element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | e7a96ccd-e6a8-409a-a3ed-4d4f1d347bec | checkNodepoolRollingUpgradeIntermediateStatus | ['"fmt"', '"strconv"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolRollingUpgradeIntermediateStatus(name string) bool {
// check machinedeployment UNAVAILABLE nodes should not be zero
infraID, err := h.getInfraID()
o.Expect(err).ShouldNot(o.HaveOccurred())
cond := `-ojsonpath={.status.unavailableReplicas}`
unavailableNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "machinedeployment", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(unavailableNum).ShouldNot(o.BeEmpty())
num, err := strconv.Atoi(unavailableNum)
o.Expect(err).ShouldNot(o.HaveOccurred())
if num <= 0 {
return false
}
// get machinesets.cluster.x-k8s.io according to nodepool
machinesetCAPI := "machinesets.cluster.x-k8s.io"
labelFilter := "cluster.x-k8s.io/cluster-name=" + infraID
format := `-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].metadata.name}`
cond = fmt.Sprintf(format, h.namespace, name)
machinesets, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, machinesetCAPI, "-l", labelFilter, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(machinesets).ShouldNot(o.BeEmpty())
// a new machineset should be created, so number of machinesets should be 2
if len(strings.Split(machinesets, " ")) <= 1 {
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | c66d81c1-a2f1-4bd3-8741-6fdd24a48dab | pollCheckNodepoolRollingUpgradeComplete | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodepoolRollingUpgradeComplete(name string) func() bool {
return func() bool {
return h.checkNodepoolRollingUpgradeComplete(name)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 2bf40dde-7564-4251-ae13-6733c78b9053 | checkNodepoolRollingUpgradeComplete | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolRollingUpgradeComplete(name string) bool {
if !h.checkNodepoolRollingUpgradeCompleteByMachineDeployment(name) {
e2e.Logf("checkNodepoolRollingUpgradeCompleteByMachineDeployment false")
return false
}
if !h.checkNodePoolReady(name) {
e2e.Logf("checkNodePoolReady false")
return false
}
if !h.checkNodepoolHostedClusterNodeReady(name) {
e2e.Logf("checkNodepoolHostedClusterNodeReady false")
return false
}
return true
} | hypershift | ||||
function | openshift/openshift-tests-private | d10f7b0a-f85d-40b1-a5da-14ed284f6fb3 | getNodepoolReadyReplicas | ['"strconv"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodepoolReadyReplicas(name string) int {
// get nodepool ready replics
replicas, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, "-ojsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
replicasNum, err := strconv.Atoi(replicas)
o.Expect(err).ShouldNot(o.HaveOccurred())
return replicasNum
} | hypershift | |||
function | openshift/openshift-tests-private | 39136275-7fcd-4319-8000-a5c8f4223a68 | getNodepoolHostedClusterReadyNodesNumber | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodepoolHostedClusterReadyNodesNumber(name string) int {
params := []string{"node", "--ignore-not-found", "-l", "hypershift.openshift.io/nodePool=" + name, `-ojsonpath={.items[*].status.conditions[?(@.type=="Ready")].status}`}
status, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("get").Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
readyNodeNum := strings.Count(status, "True")
return readyNodeNum
} | hypershift | |||
function | openshift/openshift-tests-private | 76a4f13e-0fd0-47ce-a776-63b167d29d76 | getNodepoolHostedClusterNodes | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodepoolHostedClusterNodes(name string) []string {
params := []string{"node", "--ignore-not-found", "-l", "hypershift.openshift.io/nodePool=" + name, `-ojsonpath={.items[*].metadata.name}`}
nameList, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(strings.TrimSpace(nameList)) <= 0 {
return []string{}
}
return strings.Split(nameList, " ")
} | hypershift | |||
function | openshift/openshift-tests-private | 1d19aec9-7c9a-45f3-bae2-f0045f7cb13b | getHostedClusterNodeInstanceType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClusterNodeInstanceType(nodeName string) string {
params := []string{"node", nodeName, "--ignore-not-found", `-ojsonpath={.metadata.labels.beta\.kubernetes\.io/instance-type}`}
instanceType, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).ShouldNot(o.BeEmpty())
return instanceType
} | hypershift | ||||
function | openshift/openshift-tests-private | 80e0d41e-b999-4ba8-bc0d-16fd10db6d05 | checkNodepoolHostedClusterNodeReady | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolHostedClusterNodeReady(name string) bool {
replicasNum := h.getNodepoolReadyReplicas(name)
readyNodeNum := h.getNodepoolHostedClusterReadyNodesNumber(name)
return replicasNum == readyNodeNum
} | hypershift | ||||
function | openshift/openshift-tests-private | d58f7a69-8513-429c-b561-44097b50ec2f | checkNodepoolRollingUpgradeCompleteByMachineDeployment | ['"fmt"', '"strconv"', '"github.com/tidwall/gjson"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolRollingUpgradeCompleteByMachineDeployment(name string) bool {
// check machinedeployment status
cond := `-ojsonpath={.status}`
statusStr, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "machinedeployment", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(statusStr).ShouldNot(o.BeEmpty())
status := gjson.Parse(statusStr).Value().(map[string]interface{})
var unavailable, replicas, ready, updated interface{}
var ok bool
//check unavailableReplicas should be zero
unavailable, ok = status["unavailableReplicas"]
o.Expect(ok).Should(o.BeTrue())
unavailableNum, err := strconv.Atoi(fmt.Sprint(unavailable))
o.Expect(err).ShouldNot(o.HaveOccurred())
if unavailableNum != 0 {
return false
}
//check replicas == ready == updated
replicas, ok = status["replicas"]
o.Expect(ok).Should(o.BeTrue())
replicaNum, err := strconv.Atoi(fmt.Sprint(replicas))
o.Expect(err).ShouldNot(o.HaveOccurred())
ready, ok = status["readyReplicas"]
o.Expect(ok).Should(o.BeTrue())
readyNum, err := strconv.Atoi(fmt.Sprint(ready))
o.Expect(err).ShouldNot(o.HaveOccurred())
updated, ok = status["updatedReplicas"]
o.Expect(ok).Should(o.BeTrue())
updatedNum, err := strconv.Atoi(fmt.Sprint(updated))
o.Expect(err).ShouldNot(o.HaveOccurred())
if replicaNum != readyNum || replicaNum != updatedNum {
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | 6ec7d32e-a2fc-4061-9c6b-06872d2bdaa8 | checkNodepoolHostedClusterNodeInstanceType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolHostedClusterNodeInstanceType(npName string) bool {
expected := h.getAWSNodepoolInstanceType(npName)
replicas := h.getNodepoolReadyReplicas(npName)
nodes := h.getNodepoolHostedClusterNodes(npName)
o.Expect(len(nodes)).Should(o.Equal(replicas))
for _, name := range nodes {
instanceType := h.getHostedClusterNodeInstanceType(name)
if instanceType != expected {
e2e.Logf("hosted cluster node %s instanceType: %s is not expected %s", name, instanceType, expected)
return false
}
}
return true
} | hypershift | ||||
function | openshift/openshift-tests-private | f8c7d677-654d-4e6a-81b5-ad7dd137d9d0 | getCPEtcdLeaderAndFollowers | ['"fmt"', '"strconv"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getCPEtcdLeaderAndFollowers() (string, []string, error) {
var leader string
var followers []string
etcdEndpointStatusCmd := "ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 endpoint status"
replicas := doOcpReq(h.oc, OcpGet, true, "-n", h.namespace+"-"+h.name, "sts", "etcd", `-ojsonpath={.spec.replicas}`)
totalNum, err := strconv.Atoi(replicas)
o.Expect(err).ShouldNot(o.HaveOccurred())
for i := 0; i < totalNum; i++ {
podName := "etcd-" + strconv.Itoa(i)
res, err := exutil.RemoteShPodWithBashSpecifyContainer(h.oc, h.namespace+"-"+h.name, podName, "etcd", etcdEndpointStatusCmd)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("endpoint status %s", res)
arr := strings.Split(res, ",")
o.Expect(len(arr) > 5).Should(o.BeTrue())
if strings.TrimSpace(arr[4]) == "true" {
if leader != "" {
return "", []string{}, fmt.Errorf("multiple leaders found error")
}
leader = podName
} else {
followers = append(followers, podName)
}
}
if leader == "" {
return "", []string{}, fmt.Errorf("no leader found error")
}
return leader, followers, nil
} | hypershift | |||
function | openshift/openshift-tests-private | df2f6b82-faaf-4ee1-925f-7aa2284a3666 | getEtcdNodeMapping | ['"strconv"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getEtcdNodeMapping() map[string]string {
replicas := doOcpReq(h.oc, OcpGet, true, "-n", h.namespace+"-"+h.name, "sts", "etcd", `-ojsonpath={.spec.replicas}`)
totalNum, err := strconv.Atoi(replicas)
o.Expect(err).ShouldNot(o.HaveOccurred())
etcdNodeMap := make(map[string]string, 1)
for i := 0; i < totalNum; i++ {
etcdPod := "etcd-" + strconv.Itoa(i)
node := doOcpReq(h.oc, OcpGet, true, "-n", h.namespace+"-"+h.name, "pod", etcdPod, `-ojsonpath={.spec.nodeName}`)
etcdNodeMap[etcdPod] = node
}
return etcdNodeMap
} | hypershift | |||
function | openshift/openshift-tests-private | 23c5714d-b676-4149-8490-aa5eb09fd0ae | isCPEtcdPodHealthy | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) isCPEtcdPodHealthy(podName string) bool {
etcdEndpointHealthCmd := "ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 endpoint health"
res, err := exutil.RemoteShPodWithBashSpecifyContainer(h.oc, h.namespace+"-"+h.name, podName, "etcd", etcdEndpointHealthCmd)
if err != nil {
e2e.Logf("CP ETCD %s is unhealthy with error : %s , \n res: %s", podName, err.Error(), res)
return false
}
if strings.Contains(res, "unhealthy") {
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | 7000f729-fcca-4c9b-9bd6-916e4aad33a1 | getNodeNameByNodepool | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodeNameByNodepool(npName string) []string {
labelFilter := "hypershift.openshift.io/nodePool=" + npName
nodes := h.getHostedClusterNodeNameByLabelFilter(labelFilter)
return strings.Split(strings.TrimSpace(nodes), " ")
} | hypershift | |||
function | openshift/openshift-tests-private | 3099516c-5876-4812-b984-f5e1f8319694 | getUnstructuredNodePoolByName | ['"context"', '"fmt"', '"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"', '"k8s.io/apimachinery/pkg/runtime/schema"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getUnstructuredNodePoolByName(ctx context.Context, npName string) (*unstructured.Unstructured, error) {
// Dynamically obtain the gvr to avoid version change in the future
npRESTMapping, err := h.oc.RESTMapper().RESTMapping(schema.GroupKind{
Group: "hypershift.openshift.io",
Kind: "NodePool",
})
if err != nil {
return nil, fmt.Errorf("error getting RESTMapping for hypershift.openshift.io/NodePool: %w", err)
}
npUnstructured, err := h.oc.AdminDynamicClient().Resource(npRESTMapping.Resource).Namespace(h.namespace).Get(ctx, npName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting NodePool/%s: %w", npName, err)
}
hcName, found, err := unstructured.NestedString(npUnstructured.Object, "spec", "clusterName")
if err != nil || !found {
return nil, fmt.Errorf("error extracting NodePool.spec.clusterName: %w", err)
}
if hcName != h.name {
return nil, fmt.Errorf("expect NodePool.spec.clusterName to be %s but found to be %s", h.name, hcName)
}
return npUnstructured, nil
} | hypershift | |||
function | openshift/openshift-tests-private | e3718793-6f28-4ae0-9011-799ad6884538 | getCurrentInfraMachineTemplatesByNodepool | ['"context"', '"errors"', '"fmt"', '"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"', '"k8s.io/apimachinery/pkg/runtime/schema"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getCurrentInfraMachineTemplatesByNodepool(ctx context.Context, npName string) (*unstructured.Unstructured, error) {
npUnstructured, err := h.getUnstructuredNodePoolByName(ctx, npName)
if err != nil {
return nil, fmt.Errorf("error getting unstructured NodePool %s: %w", npName, err)
}
platform, found, err := unstructured.NestedString(npUnstructured.Object, "spec", "platform", "type")
if err != nil || !found {
return nil, fmt.Errorf("error extracting NodePool.spec.platform.type: %w", err)
}
e2e.Logf("Found NodePool/%s platform = %s", npName, platform)
infraMachineTemplateKind, ok := platform2InfraMachineTemplateKind[platform]
if !ok {
return nil, fmt.Errorf("no infra machine template kind for platform %s. Available options: %v", platform, platform2InfraMachineTemplateKind)
}
e2e.Logf("Found infra machine template kind = %s", infraMachineTemplateKind)
infraMachineTemplateRESTMapping, err := h.oc.RESTMapper().RESTMapping(schema.GroupKind{
Group: capiInfraGroup,
Kind: infraMachineTemplateKind,
})
if err != nil {
return nil, fmt.Errorf("error getting RESTMapping for kind %s in group %s: %w", infraMachineTemplateKind, capiInfraGroup, err)
}
hcpNs := h.getHostedComponentNamespace()
if len(hcpNs) == 0 {
return nil, errors.New("empty hosted component namespace obtained from the hostedCluster object")
}
infraMachineTempName, ok := npUnstructured.GetAnnotations()[npInfraMachineTemplateAnnotationKey]
if !ok {
return nil, fmt.Errorf("annotation %s not found on NodePool %s", npInfraMachineTemplateAnnotationKey, npName)
}
e2e.Logf("Found infra machine template name = %s", infraMachineTempName)
infraMachineTempUnstructured, err := h.oc.AdminDynamicClient().Resource(infraMachineTemplateRESTMapping.Resource).Namespace(hcpNs).Get(ctx, infraMachineTempName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting infra machine templates %s: %w", infraMachineTempName, err)
}
e2e.Logf("Found infra machine template %s", infraMachineTempUnstructured.GetName())
return infraMachineTempUnstructured, nil
} | hypershift | |||
function | openshift/openshift-tests-private | abd40858-8dcd-415f-9323-508f0af7d7f2 | DebugHostedClusterNodeWithChroot | ['"fmt"', '"strings"', '"k8s.io/apiserver/pkg/storage/names"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) DebugHostedClusterNodeWithChroot(caseID string, nodeName string, cmd ...string) (string, error) {
newNamespace := names.SimpleNameGenerator.GenerateName(fmt.Sprintf("hypershift-%s-", caseID))
defer func() {
err := h.oc.AsAdmin().AsGuestKubeconf().Run("delete").Args("namespace", newNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpCreate).Args("namespace", newNamespace).Output()
if err != nil {
return "", err
}
res, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpGet).Args("ns/"+newNamespace, `-o=jsonpath={.metadata.labels.pod-security\.kubernetes\.io/enforce}`).Output()
if err != nil {
return "", err
}
if !strings.Contains(res, "privileged") {
_, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run("label").Args("ns/"+newNamespace, `security.openshift.io/scc.podSecurityLabelSync=false`, `pod-security.kubernetes.io/enforce=privileged`, `pod-security.kubernetes.io/audit=privileged`, `pod-security.kubernetes.io/warn=privileged`, "--overwrite").Output()
if err != nil {
return "", err
}
}
res, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args(append([]string{"node/" + nodeName, "--to-namespace=" + newNamespace, "-q", "--", "chroot", "/host"}, cmd...)...).Output()
return res, err
} | hypershift | |||
function | openshift/openshift-tests-private | 141c6bdb-1b4f-481a-a1cf-55ff543059cf | updateHostedClusterAndCheck | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) updateHostedClusterAndCheck(oc *exutil.CLI, updateFunc func() error, deployment string) {
oldVersion := doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.metadata.annotations.deployment\.kubernetes\.io/revision}`)
err := updateFunc()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.metadata.annotations.deployment\.kubernetes\.io/revision}`)
}, DefaultTimeout, DefaultTimeout/10).ShouldNot(o.Equal(oldVersion), deployment+" not restart")
o.Eventually(func() int {
return strings.Compare(doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.status.replicas}`), doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.status.readyReplicas}`))
}, LongTimeout, LongTimeout/10).Should(o.Equal(0), deployment+" is not ready")
} | hypershift | |||
function | openshift/openshift-tests-private | e017b9d6-8d53-4b40-adb1-1c72c5d06bf6 | checkIDPConfigReady | ['"fmt"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkIDPConfigReady(idpType IdentityProviderType, idpName string, secretName string) bool {
//check idpType by idpName
if idpType != doOcpReq(h.oc, OcpGet, false, "hostedcluster", h.name, "-n", h.namespace, "--ignore-not-found", fmt.Sprintf(`-ojsonpath={.spec.configuration.oauth.identityProviders[?(@.name=="%s")].type}`, idpName)) {
return false
}
//check configmap oauth-openshift
configYaml := doOcpReq(h.oc, OcpGet, false, "configmap", "oauth-openshift", "-n", h.namespace+"-"+h.name, "--ignore-not-found", `-ojsonpath={.data.config\.yaml}`)
if !strings.Contains(configYaml, fmt.Sprintf("name: %s", idpName)) {
return false
}
if !strings.Contains(configYaml, fmt.Sprintf("kind: %sIdentityProvider", idpType)) {
return false
}
//check secret name if secretName is not empty
if secretName != "" {
volumeName := doOcpReq(h.oc, OcpGet, false, "deploy", "oauth-openshift", "-n", h.namespace+"-"+h.name, "--ignore-not-found", fmt.Sprintf(`-ojsonpath={.spec.template.spec.volumes[?(@.secret.secretName=="%s")].name}`, secretName))
if !strings.Contains(volumeName, "idp-secret") {
return false
}
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | 0eda1d54-aa2d-403e-b3ed-130d5d9860f5 | pollCheckIDPConfigReady | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckIDPConfigReady(idpType IdentityProviderType, idpName string, secretName string) func() bool {
return func() bool {
return h.checkIDPConfigReady(idpType, idpName, secretName)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 87ee080e-4123-4939-87f7-2980a15a5ac5 | getEtcdEndpointStatus | ['"encoding/json"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getEtcdEndpointStatus(endpoints ...string) (etcdEndpointStatusResult, error) {
var etcdEndpointStatusCmd string
if len(endpoints) == 0 {
etcdEndpointStatusCmd = etcdCmdPrefixForHostedCluster + " --endpoints " + etcdLocalClientReqEndpoint + " endpoint status --cluster -w json"
} else {
etcdEndpointStatusCmd = etcdCmdPrefixForHostedCluster + " --endpoints " + strings.Join(endpoints, ",") + " endpoint status -w json"
}
endpointStatus := doOcpReq(h.oc, OcpExec, true, "-n", h.getHostedComponentNamespace(), "etcd-0", "-c", "etcd", "--", "bash", "-c", etcdEndpointStatusCmd)
e2e.Logf("Etcd endpoint status response = %s", endpointStatus)
var res etcdEndpointStatusResult
if err := json.Unmarshal([]byte(endpointStatus), &res); err != nil {
return nil, err
}
return res, nil
} | hypershift | |||
function | openshift/openshift-tests-private | 6eff62f4-4b21-4a79-897a-a687ffa82589 | getEtcdEndpointDbStatsByIdx | ['"errors"', '"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getEtcdEndpointDbStatsByIdx(idx int) (dbSize, dbSizeInUse int64, dbFragRatio float64, err error) {
var localEtcdEndpointStatus etcdEndpointStatusResult
etcdEndpoint := h.getEtcdDiscoveryEndpointForClientReqByIdx(idx)
if localEtcdEndpointStatus, err = h.getEtcdEndpointStatus(etcdEndpoint); err != nil {
return -1, -1, 0, fmt.Errorf("error querying local ETCD endpoint status: %w", err)
}
dbSize, dbSizeInUse = localEtcdEndpointStatus[0].Status.DbSize, localEtcdEndpointStatus[0].Status.DbSizeInUse
if dbSize == 0 {
return -1, -1, 0, errors.New("zero dbSize obtained from ETCD server's response")
}
if dbSizeInUse == 0 {
return -1, -1, 0, errors.New("zero dbSizeInUse obtained from ETCD server's response")
}
fragRatio := float64(dbSize-dbSizeInUse) / float64(dbSize)
e2e.Logf("Found ETCD endpoint %s: dbSize = %d, dbSizeInUse = %d, fragmentation ratio = %.2f", etcdEndpoint, dbSize, dbSizeInUse, fragRatio)
return dbSize, dbSizeInUse, fragRatio, nil
} | hypershift | |||
function | openshift/openshift-tests-private | 73d213fe-01c8-4ef5-8bbf-98b4fc434b13 | getEtcdDiscoveryEndpointForClientReqByIdx | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getEtcdDiscoveryEndpointForClientReqByIdx(idx int) string {
hcpNs := h.getHostedComponentNamespace()
return fmt.Sprintf("etcd-%d.%s.%s.svc:%s", idx, etcdDiscoverySvcNameForHostedCluster, hcpNs, etcdClientReqPort)
} | hypershift | |||
function | openshift/openshift-tests-private | 28032c48-181c-42fb-a657-7e88a21d3b6a | checkHCSpecForAzureEtcdEncryption | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkHCSpecForAzureEtcdEncryption(expected azureKMSKey, isBackupKey bool) {
keyPath := "activeKey"
if isBackupKey {
keyPath = "backupKey"
}
keyName := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
fmt.Sprintf("-o=jsonpath={.spec.secretEncryption.kms.azure.%s.keyName}", keyPath))
o.Expect(keyName).To(o.Equal(expected.keyName))
keyVaultName := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
fmt.Sprintf("-o=jsonpath={.spec.secretEncryption.kms.azure.%s.keyVaultName}", keyPath))
o.Expect(keyVaultName).To(o.Equal(expected.keyVaultName))
keyVersion := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
fmt.Sprintf("-o=jsonpath={.spec.secretEncryption.kms.azure.%s.keyVersion}", keyPath))
o.Expect(keyVersion).To(o.Equal(expected.keyVersion))
} | hypershift | |||
function | openshift/openshift-tests-private | 3c0108cb-6cba-4fd5-a386-a1dfff1a05a9 | checkKASEncryptionConfiguration | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkKASEncryptionConfiguration() {
kasSecretEncryptionConfigSecret := doOcpReq(h.oc, OcpExtract, true,
fmt.Sprintf("secret/%s", kasEncryptionConfigSecretName), "-n", h.getHostedComponentNamespace(), "--to", "-")
o.Expect(kasSecretEncryptionConfigSecret).To(o.And(
o.ContainSubstring("secrets"),
o.ContainSubstring("configmaps"),
o.ContainSubstring("routes"),
o.ContainSubstring("oauthaccesstokens"),
o.ContainSubstring("oauthauthorizetokens"),
))
} | hypershift | |||
function | openshift/openshift-tests-private | 7390a902-c129-4d08-a0f1-0c8631f71a6f | checkSecretEncryptionDecryption | ['"fmt"', '"strings"', 'appsv1 "k8s.io/api/apps/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkSecretEncryptionDecryption(isEtcdEncrypted bool) {
var (
secretName = fmt.Sprintf("etcd-encryption-%s", strings.ToLower(exutil.RandStrDefault()))
secretNs = "default"
secretKey = "foo"
secretValue = "bar"
)
e2e.Logf("Creating secret/%s within ns/%s of the hosted cluster", secretName, secretNs)
doOcpReq(h.oc.AsGuestKubeconf(), OcpCreate, true, "secret", "generic", secretName,
"-n", secretNs, fmt.Sprintf("--from-literal=%s=%s", secretKey, secretValue))
e2e.Logf("Checking secret decryption")
decryptedSecretContent := doOcpReq(h.oc.AsGuestKubeconf(), OcpExtract, true,
fmt.Sprintf("secret/%s", secretName), "-n", secretNs, "--to", "-")
o.Expect(decryptedSecretContent).To(o.And(
o.ContainSubstring(secretKey),
o.ContainSubstring(secretValue),
))
// Unencrypted secrets look like the following:
// /kubernetes.io/secrets/default/test-secret.<secret-content>
// Encrypted secrets look like the following:
// /kubernetes.io/secrets/default/test-secret.k8s:enc:kms:v1:<EncryptionConfiguration-provider-name>:.<encrypted-content>
if !isEtcdEncrypted {
return
}
e2e.Logf("Checking ETCD encryption")
etcdCmd := fmt.Sprintf("%s --endpoints %s get /kubernetes.io/secrets/%s/%s | hexdump -C | awk -F '|' '{print $2}' OFS= ORS=",
etcdCmdPrefixForHostedCluster, etcdLocalClientReqEndpoint, secretNs, secretName)
encryptedSecretContent, err := exutil.RemoteShPodWithBashSpecifyContainer(h.oc, h.getHostedComponentNamespace(),
"etcd-0", "etcd", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get encrypted secret content within ETCD")
o.Expect(encryptedSecretContent).NotTo(o.BeEmpty(), "obtained empty encrypted secret content")
o.Expect(encryptedSecretContent).NotTo(o.ContainSubstring(secretValue))
e2e.Logf("Deleting secret")
_ = h.oc.AsGuestKubeconf().Run(OcpDelete).Args("secret", secretName, "-n", secretNs).Execute()
} | hypershift | |||
function | openshift/openshift-tests-private | 6d7740c0-b19f-429f-b11d-f83cf17eb5e5 | checkAzureEtcdEncryption | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAzureEtcdEncryption(activeKey azureKMSKey, backupKey *azureKMSKey) {
e2e.Logf("Checking hc.spec.secretEncryption.kms.azure.activeKey")
h.checkHCSpecForAzureEtcdEncryption(activeKey, false)
if backupKey != nil {
e2e.Logf("Checking hc.spec.secretEncryption.kms.azure.backupKey")
h.checkHCSpecForAzureEtcdEncryption(*backupKey, true)
}
e2e.Logf("Checking the ValidAzureKMSConfig condition of the hc")
validAzureKMSConfigStatus := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
`-o=jsonpath={.status.conditions[?(@.type == "ValidAzureKMSConfig")].status}`)
o.Expect(validAzureKMSConfigStatus).To(o.Equal("True"))
e2e.Logf("Checking KAS EncryptionConfiguration")
h.checkKASEncryptionConfiguration()
e2e.Logf("Checking secret encryption/decryption within the hosted cluster")
h.checkSecretEncryptionDecryption(true)
} | hypershift | ||||
function | openshift/openshift-tests-private | cac3c05b-178c-4f35-9868-04f473d0b0fc | waitForKASDeployUpdate | ['"context"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) waitForKASDeployUpdate(ctx context.Context, oldResourceVersion string) {
kasDeployKindAndName := "deploy/kube-apiserver"
err := exutil.WaitForResourceUpdate(ctx, h.oc, DefaultTimeout/20, DefaultTimeout,
kasDeployKindAndName, h.getHostedComponentNamespace(), oldResourceVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to wait for KAS deployment to be updated")
} | hypershift | |||
function | openshift/openshift-tests-private | 5331fb9c-cb69-4e70-a1bb-bf089a6c4d79 | waitForKASDeployReady | ['"context"', '"k8s.io/apimachinery/pkg/fields"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) waitForKASDeployReady(ctx context.Context) {
kasDeployName := "kube-apiserver"
exutil.WaitForDeploymentsReady(ctx, func(ctx context.Context) (*appsv1.DeploymentList, error) {
return h.oc.AdminKubeClient().AppsV1().Deployments(h.getHostedComponentNamespace()).List(ctx, metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", kasDeployName).String(),
})
}, exutil.IsDeploymentReady, LongTimeout, LongTimeout/20, true)
} | hypershift | |||
function | openshift/openshift-tests-private | 56005669-268e-4a41-8b19-0bfb7729f572 | patchAzureKMS | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) patchAzureKMS(activeKey, backupKey *azureKMSKey) {
patch, err := getHCPatchForAzureKMS(activeKey, backupKey)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get HC patch Azure KMS")
doOcpReq(h.oc, OcpPatch, true, "hc", "-n", h.namespace, h.name, "--type=merge", "-p", patch)
} | hypershift | ||||
function | openshift/openshift-tests-private | e59cd90c-1fe0-4682-bb1e-f03f0cb6c4a5 | removeAzureKMSBackupKey | ['"encoding/json"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) removeAzureKMSBackupKey() {
doOcpReq(h.oc, OcpPatch, true, "hc", h.name, "-n", h.namespace, "--type=json",
"-p", `[{"op": "remove", "path": "/spec/secretEncryption/kms/azure/backupKey"}]`)
} | hypershift | |||
function | openshift/openshift-tests-private | bbd62d00-7dde-4971-a42e-1250f2265f1a | encodeSecretsNs | ['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"k8s.io/client-go/util/retry"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) encodeSecretsNs(ctx context.Context, ns string) {
guestKubeClient := h.oc.GuestKubeClient()
secrets, err := guestKubeClient.CoreV1().Secrets(ns).List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list secrets")
backoff := wait.Backoff{Steps: 10, Duration: 1 * time.Second}
for _, secret := range secrets.Items {
err = retry.RetryOnConflict(backoff, func() error {
// Fetch the latest version of the secret
latestSecret, getErr := guestKubeClient.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{})
if getErr != nil {
return getErr
}
// Update the secret with the modified version
_, updateErr := guestKubeClient.CoreV1().Secrets(ns).Update(ctx, latestSecret, metav1.UpdateOptions{})
return updateErr
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to update secret with retry")
}
} | hypershift | |||
function | openshift/openshift-tests-private | b7ffc0a2-9565-4c1d-babf-3467d55439b1 | encodeSecrets | ['"context"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) encodeSecrets(ctx context.Context) {
namespaces, err := h.oc.GuestKubeClient().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list namespaces")
for _, ns := range namespaces.Items {
h.encodeSecretsNs(ctx, ns.Name)
}
} | hypershift | |||
function | openshift/openshift-tests-private | 366df248-3d47-40de-b45d-9ac0fad69dfb | encodeConfigmapsNs | ['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"k8s.io/client-go/util/retry"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) encodeConfigmapsNs(ctx context.Context, ns string) {
guestKubeClient := h.oc.GuestKubeClient()
configmaps, err := guestKubeClient.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list configmaps")
backoff := wait.Backoff{Steps: 10, Duration: 1 * time.Second}
for _, configmap := range configmaps.Items {
err = retry.RetryOnConflict(backoff, func() error {
// Fetch the latest version of the configmap
latestConfigmap, getErr := guestKubeClient.CoreV1().ConfigMaps(ns).Get(ctx, configmap.Name, metav1.GetOptions{})
if getErr != nil {
return getErr
}
// Update the configmap with the modified version
_, updateErr := guestKubeClient.CoreV1().ConfigMaps(ns).Update(ctx, latestConfigmap, metav1.UpdateOptions{})
return updateErr
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to update configmap with retry")
}
} | hypershift | |||
function | openshift/openshift-tests-private | bac6b2b9-89af-4b8e-8fcf-2205ef49e680 | encodeConfigmaps | ['"context"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) encodeConfigmaps(ctx context.Context) {
namespaces, err := h.oc.GuestKubeClient().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list namespaces")
for _, ns := range namespaces.Items {
h.encodeConfigmapsNs(ctx, ns.Name)
}
} | hypershift | |||
function | openshift/openshift-tests-private | af930cb7-15f8-4e3c-be8a-aa6e4dfcbecb | pollUntilReady | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollUntilReady() {
o.Eventually(h.pollHostedClustersReady(), ClusterInstallTimeout, ClusterInstallTimeout/20).Should(o.BeTrue())
} | hypershift | ||||
function | openshift/openshift-tests-private | a1b66466-1499-4a29-8342-1307518b5559 | getKASResourceVersion | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getKASResourceVersion() string {
return doOcpReq(h.oc, OcpGet, true, "deploy/kube-apiserver", "-n", h.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
} | hypershift | ||||
function | openshift/openshift-tests-private | 0cb4458d-592e-48b7-96ae-83505682193f | getOLMCatalogPlacement | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getOLMCatalogPlacement() string {
return doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace, "-o=jsonpath={.spec.olmCatalogPlacement}")
} | hypershift | ||||
test | openshift/openshift-tests-private | a296b958-2015-4527-8d68-f371419ef256 | hypershift | import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/blang/semver/v4"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
awsiam "github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/utils/format"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | package hypershift
import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/blang/semver/v4"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
awsiam "github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/utils/format"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
)
var _ = g.Describe("[sig-hypershift] Hypershift", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIForKubeOpenShift("hypershift")
iaasPlatform, hypershiftTeamBaseDir, hcInfraID string
hostedcluster *hostedCluster
hostedclusterPlatform PlatformType
)
g.BeforeEach(func(ctx context.Context) {
hostedClusterName, hostedclusterKubeconfig, hostedClusterNs := exutil.ValidHypershiftAndGetGuestKubeConf(oc)
oc.SetGuestKubeconf(hostedclusterKubeconfig)
hostedcluster = newHostedCluster(oc, hostedClusterNs, hostedClusterName)
hostedcluster.setHostedClusterKubeconfigFile(hostedclusterKubeconfig)
operator := doOcpReq(oc, OcpGet, false, "pods", "-n", "hypershift", "-ojsonpath={.items[*].metadata.name}")
if len(operator) <= 0 {
g.Skip("hypershift operator not found, skip test run")
}
// get IaaS platform
iaasPlatform = exutil.ExtendedCheckPlatform(ctx, oc)
hypershiftTeamBaseDir = exutil.FixturePath("testdata", "hypershift")
// hosted cluster infra ID
hcInfraID = doOcpReq(oc, OcpGet, true, "hc", hostedClusterName, "-n", hostedClusterNs, `-ojsonpath={.spec.infraID}`)
hostedclusterPlatform = doOcpReq(oc, OcpGet, true, "hostedcluster", "-n", hostedcluster.namespace, hostedcluster.name, "-ojsonpath={.spec.platform.type}")
e2e.Logf("HostedCluster platform is: %s", hostedclusterPlatform)
if exutil.IsROSA() {
exutil.ROSALogin()
}
})
// author: [email protected]
g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-42855-Check Status Conditions for HostedControlPlane", func() {
rc := hostedcluster.checkHCConditions()
o.Expect(rc).Should(o.BeTrue())
// add more test here to check hypershift util
operatorNS := exutil.GetHyperShiftOperatorNameSpace(oc)
e2e.Logf("hosted cluster operator namespace %s", operatorNS)
o.Expect(operatorNS).NotTo(o.BeEmpty())
hostedclusterNS := exutil.GetHyperShiftHostedClusterNameSpace(oc)
e2e.Logf("hosted cluster namespace %s", hostedclusterNS)
o.Expect(hostedclusterNS).NotTo(o.BeEmpty())
guestClusterName, guestClusterKube, _ := exutil.ValidHypershiftAndGetGuestKubeConf(oc)
e2e.Logf("hostedclustercluster name %s", guestClusterName)
cv, err := oc.AsAdmin().SetGuestKubeconf(guestClusterKube).AsGuestKubeconf().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("hosted cluster clusterversion name %s", cv)
guestClusterName, guestClusterKube, _ = exutil.ValidHypershiftAndGetGuestKubeConfWithNoSkip(oc)
o.Expect(guestClusterName).NotTo(o.BeEmpty())
o.Expect(guestClusterKube).NotTo(o.BeEmpty())
cv, err = oc.AsAdmin().SetGuestKubeconf(guestClusterKube).AsGuestKubeconf().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("hosted cluster clusterversion with noskip api name %s", cv)
})
// author: [email protected]
// deprecated: it is covered by console team https://github.com/openshift/openshift-tests-private/blob/master/frontend/console-test-frontend-hypershift.sh#L32-L35
g.It("Author:heli-ROSA-OSD_CCS-DEPRECATED-HyperShiftMGMT-Critical-43555-Allow direct ingress on guest clusters on AWS", func() {
var bashClient = NewCmdClient()
console, psw := hostedcluster.getHostedclusterConsoleInfo()
parms := fmt.Sprintf("curl -u admin:%s %s -k -LIs -o /dev/null -w %s ", psw, console, "%{http_code}")
res, err := bashClient.Run(parms).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
checkSubstring(res, []string{"200"})
})
// author: [email protected] [email protected]
// test run duration: ~25min
g.It("Author:heli-HyperShiftMGMT-Longduration-NonPreRelease-Critical-43272-Critical-43829-Test cluster autoscaler via hostedCluster autoScaling settings [Serial]", func() {
if iaasPlatform != "aws" && iaasPlatform != "azure" {
g.Skip("Skip due to incompatible platform")
}
var (
npCount = 1
npName = "jz-43272-test-01"
autoScalingMax = "3"
autoScalingMin = "1"
workloadTemplate = filepath.Join(hypershiftTeamBaseDir, "workload.yaml")
parsedWorkloadFile = "ocp-43272-workload-template.config"
)
exutil.By("create a nodepool")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
switch iaasPlatform {
case "aws":
hostedcluster.createAwsNodePool(npName, npCount)
case "azure":
hostedcluster.createAdditionalAzureNodePool(npName, npCount)
}
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
o.Expect(hostedcluster.isNodepoolAutosaclingEnabled(npName)).Should(o.BeFalse())
exutil.By("enable the nodepool to be autoscaling")
hostedcluster.setNodepoolAutoScale(npName, autoScalingMax, autoScalingMin)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready after setting autoscaling error")
o.Expect(hostedcluster.isNodepoolAutosaclingEnabled(npName)).Should(o.BeTrue())
exutil.By("create a job as workload in the hosted cluster")
wl := workload{
name: "workload",
namespace: "default",
template: workloadTemplate,
}
defer wl.delete(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile)
wl.create(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile, "--local")
exutil.By("check nodepool is auto-scaled to max")
o.Eventually(hostedcluster.pollCheckNodepoolCurrentNodes(npName, autoScalingMax), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool autoscaling max error")
})
// author: [email protected]
g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-43554-Check FIPS support in the Hosted Cluster", func() {
if !hostedcluster.isFIPEnabled() {
g.Skip("only for the fip enabled hostedcluster, skip test run")
}
o.Expect(hostedcluster.checkFIPInHostedCluster()).Should(o.BeTrue())
})
// author: [email protected]
g.It("HyperShiftMGMT-ROSA-Author:heli-Critical-45770-Test basic fault resilient HA-capable etcd[Serial][Disruptive]", func() {
if !hostedcluster.isCPHighlyAvailable() {
g.Skip("this is for hosted cluster HA mode , skip test run")
}
//check etcd
antiAffinityJSONPath := ".spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution"
topologyKeyJSONPath := antiAffinityJSONPath + "[*].topologyKey"
desiredTopogyKey := "topology.kubernetes.io/zone"
etcdSts := "etcd"
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "statefulset", etcdSts, "-ojsonpath={"+antiAffinityJSONPath+"}")
res := doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "statefulset", etcdSts, "-ojsonpath={"+topologyKeyJSONPath+"}")
o.Expect(res).To(o.ContainSubstring(desiredTopogyKey))
//check etcd healthy
etcdCmd := "ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt " +
"--cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379"
etcdHealthCmd := etcdCmd + " endpoint health"
etcdStatusCmd := etcdCmd + " endpoint status"
for i := 0; i < 3; i++ {
res = doOcpReq(oc, OcpExec, true, "-n", controlplaneNS, "etcd-"+strconv.Itoa(i), "--", "sh", "-c", etcdHealthCmd)
o.Expect(res).To(o.ContainSubstring("localhost:2379 is healthy"))
}
for i := 0; i < 3; i++ {
etcdPodName := "etcd-" + strconv.Itoa(i)
res = doOcpReq(oc, OcpExec, true, "-n", controlplaneNS, etcdPodName, "--", "sh", "-c", etcdStatusCmd)
if strings.Contains(res, "false, false") {
e2e.Logf("find etcd follower etcd-%d, begin to delete this pod", i)
//delete the first follower
doOcpReq(oc, OcpDelete, true, "-n", controlplaneNS, "pod", etcdPodName)
//check the follower can be restarted and keep health
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
status := doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "pod", etcdPodName, "-ojsonpath={.status.phase}")
if status == "Running" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "etcd cluster health check error")
//check the follower pod running
status := doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "pod", etcdPodName, "-ojsonpath={.status.phase}")
o.Expect(status).To(o.ContainSubstring("Running"))
//check the follower health
execEtcdHealthCmd := append([]string{"-n", controlplaneNS, etcdPodName, "--", "sh", "-c"}, etcdHealthCmd)
res = doOcpReq(oc, OcpExec, true, execEtcdHealthCmd...)
o.Expect(res).To(o.ContainSubstring("localhost:2379 is healthy"))
break
}
}
})
// author: [email protected]
g.It("Author:heli-HyperShiftMGMT-ROSA-Critical-45801-Critical-45821-Test fault resilient HA-capable etcd under network partition[Disruptive]", func(ctx context.Context) {
if !hostedcluster.isCPHighlyAvailable() {
g.Skip("this is for hosted cluster HA mode , skip test run")
}
exutil.SkipOnAKSNess(ctx, oc, false)
g.By("find leader and get mapping between etcd pod name and node name")
etcdNodeMap := hostedcluster.getEtcdNodeMapping()
leader, followers, err := hostedcluster.getCPEtcdLeaderAndFollowers()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(len(followers) > 1).Should(o.BeTrue())
defer func() {
o.Eventually(func() bool {
return hostedcluster.isCPEtcdPodHealthy(followers[0])
}, ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("error: follower %s could not recoverd now", followers[0]))
o.Expect(hostedcluster.isCPEtcdPodHealthy(leader)).Should(o.BeTrue())
for i := 1; i < len(followers); i++ {
o.Expect(hostedcluster.isCPEtcdPodHealthy(followers[i])).Should(o.BeTrue())
}
}()
g.By("drop traffic from leader to follower")
defer func() {
debugNodeStdout, err := exutil.DebugNodeWithChroot(oc, etcdNodeMap[followers[0]], "iptables", "-t", "filter", "-D", "INPUT", "-s", etcdNodeMap[leader], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("recover traffic from leader %s to follower %s, debug output: %s", etcdNodeMap[leader], etcdNodeMap[followers[0]], debugNodeStdout)
}()
debugNodeStdout, err := exutil.DebugNodeWithChroot(oc, etcdNodeMap[followers[0]], "iptables", "-t", "filter", "-A", "INPUT", "-s", etcdNodeMap[leader], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("drop traffic debug output 1: %s", debugNodeStdout)
g.By("drop traffic from follower to leader")
defer func() {
debugNodeStdout, err := exutil.DebugNodeWithChroot(oc, etcdNodeMap[leader], "iptables", "-t", "filter", "-D", "INPUT", "-s", etcdNodeMap[followers[0]], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("recover traffic from follower %s to leader %s, debug output: %s", etcdNodeMap[followers[0]], etcdNodeMap[leader], debugNodeStdout)
}()
debugNodeStdout, err = exutil.DebugNodeWithChroot(oc, etcdNodeMap[leader], "iptables", "-t", "filter", "-A", "INPUT", "-s", etcdNodeMap[followers[0]], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("drop traffic debug output 2: %s", debugNodeStdout)
g.By("follower 0 should not be health again")
o.Eventually(func() bool {
return hostedcluster.isCPEtcdPodHealthy(followers[0])
}, ShortTimeout, ShortTimeout/10).Should(o.BeFalse(), fmt.Sprintf("error: follower %s should be unhealthy now", followers[0]))
g.By("leader should be running status and the rest of follower are still in the running status too")
o.Expect(hostedcluster.isCPEtcdPodHealthy(leader)).Should(o.BeTrue())
for i := 1; i < len(followers); i++ {
o.Expect(hostedcluster.isCPEtcdPodHealthy(followers[i])).Should(o.BeTrue())
}
g.By("check hosted cluster is still working")
o.Eventually(func() error {
_, err = hostedcluster.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args("node").Output()
return err
}, ShortTimeout, ShortTimeout/10).ShouldNot(o.HaveOccurred(), "error hosted cluster could not work any more")
g.By("ocp-45801 test passed")
})
// author: [email protected]
g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-46711-Test HCP components to use service account tokens", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 46711 is for AWS - skipping test ...")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
secretsWithCreds := []string{
"cloud-controller-creds",
"cloud-network-config-controller-creds",
"control-plane-operator-creds",
"ebs-cloud-credentials",
"node-management-creds",
}
for _, sec := range secretsWithCreds {
cre := doOcpReq(oc, OcpGet, true, "secret", sec, "-n", controlplaneNS, "-ojsonpath={.data.credentials}")
roleInfo, err := base64.StdEncoding.DecodeString(cre)
o.Expect(err).ShouldNot(o.HaveOccurred())
checkSubstring(string(roleInfo), []string{"role_arn", "web_identity_token_file"})
}
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:heli-Critical-44824-Resource requests/limit configuration for critical control plane workloads[Serial][Disruptive]", func() {
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
cpuRequest := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.cpu}`)
memoryRequest := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.memory}`)
e2e.Logf("cpu request: %s, memory request: %s\n", cpuRequest, memoryRequest)
defer func() {
//change back to original cpu, memory value
patchOptions := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"kube-apiserver","resources":{"requests":{"cpu":"%s", "memory": "%s"}}}]}}}}`, cpuRequest, memoryRequest)
doOcpReq(oc, OcpPatch, true, "deploy", "kube-apiserver", "-n", controlplaneNS, "-p", patchOptions)
//check new value of cpu, memory resource
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
cpuRes := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.cpu}`)
if cpuRes != cpuRequest {
return false, nil
}
memoryRes := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.memory}`)
if memoryRes != memoryRequest {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "kube-apiserver cpu & memory resource change back error")
}()
//change cpu, memory resources
desiredCPURequest := "200m"
desiredMemoryReqeust := "1700Mi"
patchOptions := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"kube-apiserver","resources":{"requests":{"cpu":"%s", "memory": "%s"}}}]}}}}`, desiredCPURequest, desiredMemoryReqeust)
doOcpReq(oc, OcpPatch, true, "deploy", "kube-apiserver", "-n", controlplaneNS, "-p", patchOptions)
//check new value of cpu, memory resource
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
cpuRes := doOcpReq(oc, OcpGet, false, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.cpu}`)
if cpuRes != desiredCPURequest {
return false, nil
}
memoryRes := doOcpReq(oc, OcpGet, false, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.memory}`)
if memoryRes != desiredMemoryReqeust {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "kube-apiserver cpu & memory resource update error")
})
// author: [email protected]
g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-44926-Test priority classes for Hypershift control plane workloads", func() {
if iaasPlatform != "aws" && iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44926 is for AWS or Azure - skipping test ...")
}
//deployment
priorityClasses := map[string][]string{
"hypershift-api-critical": {
"kube-apiserver",
"oauth-openshift",
"openshift-oauth-apiserver",
"openshift-apiserver",
"packageserver",
"ovnkube-control-plane",
},
//oc get deploy -n clusters-demo-02 -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.template.
//spec.priorityClassName}{"\n"}{end}' | grep hypershift-control-plane | awk '{print "\""$1"\""","}'
"hypershift-control-plane": {
"capi-provider",
"catalog-operator",
"cluster-api",
"cluster-autoscaler",
"cluster-image-registry-operator",
"cluster-network-operator",
"cluster-node-tuning-operator",
"cluster-policy-controller",
"cluster-storage-operator",
"cluster-version-operator",
"control-plane-operator",
"csi-snapshot-controller",
"csi-snapshot-controller-operator",
"csi-snapshot-webhook",
"dns-operator",
"hosted-cluster-config-operator",
"ignition-server",
"ingress-operator",
"konnectivity-agent",
"kube-controller-manager",
"kube-scheduler",
"machine-approver",
"multus-admission-controller",
"olm-operator",
"openshift-controller-manager",
"openshift-route-controller-manager",
"cloud-network-config-controller",
},
}
if hostedcluster.getOLMCatalogPlacement() == olmCatalogPlacementManagement {
priorityClasses["hypershift-control-plane"] = append(priorityClasses["hypershift-control-plane"], "certified-operators-catalog", "community-operators-catalog", "redhat-marketplace-catalog", "redhat-operators-catalog")
}
switch iaasPlatform {
case "aws":
priorityClasses["hypershift-control-plane"] = append(priorityClasses["hypershift-control-plane"], "aws-ebs-csi-driver-operator", "aws-ebs-csi-driver-controller")
case "azure":
priorityClasses["hypershift-control-plane"] = append(priorityClasses["hypershift-control-plane"], "azure-disk-csi-driver-controller", "azure-disk-csi-driver-operator", "azure-file-csi-driver-controller", "azure-file-csi-driver-operator", "azure-cloud-controller-manager")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
for priority, components := range priorityClasses {
e2e.Logf("priorityClass: %s %v\n", priority, components)
for _, c := range components {
res := doOcpReq(oc, OcpGet, true, "deploy", c, "-n", controlplaneNS, "-ojsonpath={.spec.template.spec.priorityClassName}")
o.Expect(res).To(o.Equal(priority))
}
}
//check statefulset for etcd
etcdSts := "etcd"
etcdPriorityClass := "hypershift-etcd"
res := doOcpReq(oc, OcpGet, true, "statefulset", etcdSts, "-n", controlplaneNS, "-ojsonpath={.spec.template.spec.priorityClassName}")
o.Expect(res).To(o.Equal(etcdPriorityClass))
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:heli-NonPreRelease-Longduration-Critical-44942-Enable control plane deployment restart on demand[Serial]", func() {
res := doOcpReq(oc, OcpGet, false, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.metadata.annotations}")
e2e.Logf("get hostedcluster %s annotation: %s ", hostedcluster.name, res)
var cmdClient = NewCmdClient()
var restartDate string
var err error
systype := runtime.GOOS
if systype == "darwin" {
restartDate, err = cmdClient.Run("gdate --rfc-3339=date").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} else if systype == "linux" {
restartDate, err = cmdClient.Run("date --rfc-3339=date").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} else {
g.Skip("only available on linux or mac system")
}
annotationKey := "hypershift.openshift.io/restart-date"
//value to be annotated
restartAnnotation := fmt.Sprintf("%s=%s", annotationKey, restartDate)
//annotations to be verified
desiredAnnotation := fmt.Sprintf(`"%s":"%s"`, annotationKey, restartDate)
//delete if already has this annotation
existingAnno := doOcpReq(oc, OcpGet, false, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.metadata.annotations}")
e2e.Logf("get hostedcluster %s annotation: %s ", hostedcluster.name, existingAnno)
if strings.Contains(existingAnno, desiredAnnotation) {
removeAnno := annotationKey + "-"
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, removeAnno)
}
//add annotation
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, restartAnnotation)
e2e.Logf("set hostedcluster %s annotation %s done ", hostedcluster.name, restartAnnotation)
res = doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.metadata.annotations}")
e2e.Logf("get hostedcluster %s annotation: %s ", hostedcluster.name, res)
o.Expect(res).To(o.ContainSubstring(desiredAnnotation))
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
res = doOcpReq(oc, OcpGet, true, "deploy", "kube-apiserver", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.spec.template.metadata.annotations}")
if strings.Contains(res, desiredAnnotation) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "ocp-44942 hostedcluster restart annotation not found error")
})
// author: [email protected]
g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-44988-Colocate control plane components by default", func() {
//deployment
controlplaneComponents := []string{
"kube-apiserver",
"oauth-openshift",
"openshift-oauth-apiserver",
"openshift-apiserver",
"packageserver",
"capi-provider",
"catalog-operator",
"cluster-api",
// ingore it for the Azure failure when checking the label hypershift.openshift.io/hosted-control-plane=clusters-{cluster-name}
//"cluster-autoscaler",
"cluster-image-registry-operator",
"cluster-network-operator",
"cluster-node-tuning-operator",
"cluster-policy-controller",
"cluster-storage-operator",
"cluster-version-operator",
"control-plane-operator",
"csi-snapshot-controller-operator",
"dns-operator",
"hosted-cluster-config-operator",
"ignition-server",
"ingress-operator",
"konnectivity-agent",
"kube-controller-manager",
"kube-scheduler",
"machine-approver",
"olm-operator",
"openshift-controller-manager",
"openshift-route-controller-manager",
//"cloud-network-config-controller",
"csi-snapshot-controller",
"csi-snapshot-webhook",
//"multus-admission-controller",
//"ovnkube-control-plane",
}
if hostedclusterPlatform == AWSPlatform {
controlplaneComponents = append(controlplaneComponents, []string{"aws-ebs-csi-driver-controller" /*"aws-ebs-csi-driver-operator"*/}...)
}
if hostedcluster.getOLMCatalogPlacement() == olmCatalogPlacementManagement {
controlplaneComponents = append(controlplaneComponents, "certified-operators-catalog", "community-operators-catalog", "redhat-marketplace-catalog", "redhat-operators-catalog")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
controlplanAffinityLabelKey := "hypershift.openshift.io/hosted-control-plane"
controlplanAffinityLabelValue := controlplaneNS
ocJsonpath := "-ojsonpath={.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchLabels}"
for _, component := range controlplaneComponents {
res := doOcpReq(oc, OcpGet, true, "deploy", component, "-n", controlplaneNS, ocJsonpath)
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelKey))
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelValue))
}
res := doOcpReq(oc, OcpGet, true, "sts", "etcd", "-n", controlplaneNS, ocJsonpath)
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelKey))
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelValue))
res = doOcpReq(oc, OcpGet, true, "pod", "-n", controlplaneNS, "-l", controlplanAffinityLabelKey+"="+controlplanAffinityLabelValue)
checkSubstring(res, controlplaneComponents)
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:heli-Critical-48025-Test EBS allocation for nodepool[Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48025 is for AWS - skipping test ...")
}
g.By("create aws nodepools with specified root-volume-type, root-volume size and root-volume-iops")
var dftNodeCount = 1
volumeSizes := []int64{
64, 250, 512,
}
volumeIops := []int64{
4000, 6000,
}
awsConfigs := []struct {
nodepoolName string
rootVolumeSize *int64
rootVolumeType string
rootVolumeIOPS *int64
}{
{
nodepoolName: "jz-48025-01",
rootVolumeSize: &volumeSizes[0],
rootVolumeType: "gp2",
},
{
nodepoolName: "jz-48025-02",
rootVolumeSize: &volumeSizes[1],
rootVolumeType: "io1",
rootVolumeIOPS: &volumeIops[0],
},
{
nodepoolName: "jz-48025-03",
rootVolumeSize: &volumeSizes[2],
rootVolumeType: "io2",
rootVolumeIOPS: &volumeIops[1],
},
}
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
//delete nodepools simultaneously to save time
for _, cf := range awsConfigs {
hostedcluster.deleteNodePool(cf.nodepoolName)
}
for _, cf := range awsConfigs {
o.Eventually(hostedcluster.pollCheckDeletedNodePool(cf.nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}
}()
for _, cf := range awsConfigs {
NewAWSNodePool(cf.nodepoolName, hostedcluster.name, hostedcluster.namespace).
WithRootVolumeType(cf.rootVolumeType).
WithNodeCount(&dftNodeCount).
WithReleaseImage(releaseImage).
WithRootVolumeSize(cf.rootVolumeSize).
WithRootVolumeIOPS(cf.rootVolumeIOPS).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(cf.nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(),
fmt.Sprintf("nodepool %s ready error", cf.nodepoolName))
o.Expect(hostedcluster.checkAWSNodepoolRootVolumeType(cf.nodepoolName, cf.rootVolumeType)).To(o.BeTrue())
if cf.rootVolumeSize != nil {
o.Expect(hostedcluster.checkAWSNodepoolRootVolumeSize(cf.nodepoolName, *cf.rootVolumeSize)).To(o.BeTrue())
}
if cf.rootVolumeIOPS != nil {
o.Expect(hostedcluster.checkAWSNodepoolRootVolumeIOPS(cf.nodepoolName, *cf.rootVolumeIOPS)).To(o.BeTrue())
}
}
})
// author: [email protected]
g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-43553-Test MHC through nodePools[Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 43553 is for AWS - skipping test ...")
}
g.By("create aws nodepool with replica 2")
npName := "43553np-" + strings.ToLower(exutil.RandStrDefault())
replica := 2
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&replica).
WithReleaseImage(releaseImage).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("enable autoRepair for the nodepool")
hostedcluster.setNodepoolAutoRepair(npName, "true")
o.Eventually(hostedcluster.pollCheckNodepoolAutoRepairEnabled(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s autoRepair enable error", npName))
g.By("find a hosted cluster node based on the nodepool")
labelFilter := "hypershift.openshift.io/nodePool=" + npName
nodes := hostedcluster.getHostedClusterNodeNameByLabelFilter(labelFilter)
o.Expect(nodes).ShouldNot(o.BeEmpty())
nodeName := strings.Split(nodes, " ")[0]
g.By("create a pod to kill kubelet in the corresponding node of the nodepool")
nsName := "guest-43553" + strings.ToLower(exutil.RandStrDefault())
defer doOcpReq(oc, "delete", true, "ns", nsName, "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
doOcpReq(oc, "create", true, "ns", nsName, "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
doOcpReq(oc, "label", true, "ns/"+nsName, "security.openshift.io/scc.podSecurityLabelSync=false", "pod-security.kubernetes.io/enforce=privileged", "pod-security.kubernetes.io/audit=privileged", "pod-security.kubernetes.io/warn=privileged", "--overwrite", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
kubeletKillerTemplate := filepath.Join(hypershiftTeamBaseDir, "kubelet-killer.yaml")
kk := kubeletKiller{
Name: "kubelet-killer-43553",
Namespace: nsName,
NodeName: nodeName,
Template: kubeletKillerTemplate,
}
//create kubelet-killer pod to kill kubelet
parsedWorkloadFile := "ocp-43553-kubelet-killer-template.config"
defer kk.delete(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile)
kk.create(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile)
o.Eventually(hostedcluster.pollCheckNodeHealthByMHC(npName), ShortTimeout, ShortTimeout/10).ShouldNot(o.BeTrue(), fmt.Sprintf("mhc %s check failed", npName))
status := hostedcluster.getHostedClusterNodeReadyStatus(nodeName)
o.Expect(status).ShouldNot(o.BeEmpty())
//firstly the node status will be Unknown
o.Expect(status).ShouldNot(o.ContainSubstring("True"))
g.By("check if a new node is provisioned eventually")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(npName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(replica), fmt.Sprintf("node pool %s: not expected ready node number error", npName))
g.By("disable autoRepair")
hostedcluster.setNodepoolAutoRepair(npName, "false")
o.Eventually(hostedcluster.pollCheckNodepoolAutoRepairDisabled(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s autoRepair disable error", npName))
})
// author: [email protected]
g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-48392-NodePool controller updates existing awsmachinetemplate when MachineDeployment rolled out[Serial][Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48392 is for AWS - skipping test ...")
}
g.By("create aws nodepool with replica 2")
npName := "jz-48392-01"
replica := 2
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&replica).
WithReleaseImage(releaseImage).
WithInstanceType("m5.large").
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("update nodepool instance type and check the change")
expectedInstanceType := "m5.xlarge"
hostedcluster.setAWSNodepoolInstanceType(npName, expectedInstanceType)
o.Eventually(hostedcluster.pollCheckAWSNodepoolInstanceType(npName, expectedInstanceType), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s check instance type error", npName))
// check default rolling upgrade of instanceType
upgradeType := hostedcluster.getNodepoolUpgradeType(npName)
o.Expect(upgradeType).Should(o.ContainSubstring("Replace"))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeIntermediateStatus(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s check replace upgrade intermediate state error", npName))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeComplete(npName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s check replace upgrade complete state error", npName))
o.Expect(hostedcluster.checkNodepoolHostedClusterNodeInstanceType(npName)).Should(o.BeTrue())
})
// author: [email protected]
g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:mihuang-Critical-48673-Unblock node deletion-draining timeout[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48673 is for AWS - skipping test ...")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
g.By("create aws nodepool with replica 1")
npName := "48673np-" + strings.ToLower(exutil.RandStrDefault())
replica := 1
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&replica).
WithReleaseImage(releaseImage).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("Get the awsmachines name")
awsMachines := doOcpReq(oc, OcpGet, true, "awsmachines", "-n", hostedcluster.namespace+"-"+hostedcluster.name, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].metadata.name}'`, hostedcluster.namespace, npName))
e2e.Logf("awsMachines: %s", awsMachines)
g.By("Set nodeDrainTimeout to 1m")
drainTime := "1m"
doOcpReq(oc, OcpPatch, true, "nodepools", npName, "-n", hostedcluster.namespace, "-p", fmt.Sprintf(`{"spec":{"nodeDrainTimeout":"%s"}}`, drainTime), "--type=merge")
o.Expect("True").To(o.Equal(doOcpReq(oc, OcpGet, true, "nodepool", npName, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="Ready")].status}`)))
g.By("check machinedeployment and machines")
mdDrainTimeRes := doOcpReq(oc, OcpGet, true, "machinedeployment", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mdDrainTimeRes).To(o.ContainSubstring(drainTime))
g.By("check machines.cluster.x-k8s.io")
mDrainTimeRes := doOcpReq(oc, OcpGet, true, "machines.cluster.x-k8s.io", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mDrainTimeRes).To(o.ContainSubstring(drainTime))
g.By("Check the guestcluster podDisruptionBudget are not be deleted")
pdbNameSpaces := []string{"openshift-console", "openshift-image-registry", "openshift-ingress", "openshift-monitoring", "openshift-operator-lifecycle-manager"}
for _, pdbNameSpace := range pdbNameSpaces {
o.Expect(doOcpReq(oc, OcpGet, true, "podDisruptionBudget", "-n", pdbNameSpace, "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)).ShouldNot(o.BeEmpty())
}
g.By("Scale the nodepool to 0")
doOcpReq(oc, OcpScale, true, "nodepool", npName, "-n", hostedcluster.namespace, "--replicas=0")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(npName), LongTimeout, LongTimeout/10).Should(o.Equal(0), fmt.Sprintf("nodepool are not scale down to 0 in hostedcluster %s", hostedcluster.name))
g.By("Scale the nodepool to 1")
doOcpReq(oc, OcpScale, true, "nodepool", npName, "-n", hostedcluster.namespace, "--replicas=1")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(npName), LongTimeout, LongTimeout/10).Should(o.Equal(1), fmt.Sprintf("nodepool are not scale down to 1 in hostedcluster %s", hostedcluster.name))
g.By("check machinedeployment and machines")
mdDrainTimeRes = doOcpReq(oc, OcpGet, true, "machinedeployment", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mdDrainTimeRes).To(o.ContainSubstring(drainTime))
g.By("check machines.cluster.x-k8s.io")
mDrainTimeRes = doOcpReq(oc, OcpGet, true, "machines.cluster.x-k8s.io", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mDrainTimeRes).To(o.ContainSubstring(drainTime))
})
// author: [email protected]
g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:mihuang-Critical-48936-Test HyperShift cluster Infrastructure TopologyMode", func() {
controllerAvailabilityPolicy := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.controllerAvailabilityPolicy}")
e2e.Logf("controllerAvailabilityPolicy is: %s", controllerAvailabilityPolicy)
if iaasPlatform == "aws" {
o.Expect(doOcpReq(oc, OcpGet, true, "infrastructure", "-ojsonpath={.items[*].status.controlPlaneTopology}")).Should(o.Equal(controllerAvailabilityPolicy))
}
o.Expect(doOcpReq(oc, OcpGet, true, "infrastructure", "-ojsonpath={.items[*].status.controlPlaneTopology}", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)).Should(o.Equal("External"))
})
// author: [email protected]
g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:mihuang-Critical-49436-Test Nodepool conditions[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49436 is for AWS - skipping test ...")
}
g.By("Create nodepool and check nodepool conditions in progress util ready")
caseID := "49436"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
replica := 1
npNameInPlace := "49436np-inplace-" + strings.ToLower(exutil.RandStrDefault())
npNameReplace := "49436np-replace-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(npNameInPlace)
defer hostedcluster.deleteNodePool(npNameReplace)
hostedcluster.createAwsNodePool(npNameReplace, replica)
hostedcluster.createAwsInPlaceNodePool(npNameInPlace, replica, dir)
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameInPlace, []nodePoolCondition{{"Ready", "reason", "ScalingUp"}}), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "in place nodepool ready error")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameReplace, []nodePoolCondition{{"Ready", "reason", "WaitingForAvailableMachines"}, {"UpdatingConfig", "status", "True"}, {"UpdatingVersion", "status", "True"}}), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "replace nodepool ready error")
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npNameInPlace), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npNameReplace), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
hostedcluster.checkNodepoolAllConditions(npNameInPlace)
hostedcluster.checkNodepoolAllConditions(npNameReplace)
g.By("Set nodepool autoscaling, autorepair, and invaild payload image verify nodepool conditions should correctly generate")
hostedcluster.setNodepoolAutoScale(npNameReplace, "3", "1")
hostedcluster.setNodepoolAutoRepair(npNameReplace, "true")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameReplace, []nodePoolCondition{{"AutoscalingEnabled", "message", "Maximum nodes: 3, Minimum nodes: 1"}, {"AutorepairEnabled", "status", "True"}, {"ValidReleaseImage", "status", "True"}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
doOcpReq(oc, OcpPatch, true, "nodepools", npNameReplace, "-n", hostedcluster.namespace, "--type=merge", fmt.Sprintf(`--patch={"spec": {"replicas": 2}}`))
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameReplace, []nodePoolCondition{{"AutoscalingEnabled", "message", "only one of nodePool.Spec.Replicas or nodePool.Spec.AutoScaling can be set"}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
g.By("upgrade nodepool payload InPlace, enable autoscaling and autorepair verify nodepool conditions should correctly generate")
image := hostedcluster.getCPReleaseImage()
hostedcluster.checkNodepoolAllConditions(npNameInPlace)
hostedcluster.upgradeNodepoolPayloadInPlace(npNameInPlace, "quay.io/openshift-release-dev/ocp-release:quay.io/openshift-release-dev/ocp-release:4.13.0-ec.1-x86_64")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameInPlace, []nodePoolCondition{{"ValidReleaseImage", "message", "invalid reference format"}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
hostedcluster.upgradeNodepoolPayloadInPlace(npNameInPlace, image)
hostedcluster.setNodepoolAutoScale(npNameInPlace, "6", "3")
hostedcluster.setNodepoolAutoRepair(npNameInPlace, "true")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameInPlace, []nodePoolCondition{{"Ready", "reason", "ScalingUp"}, {"AutoscalingEnabled", "message", "Maximum nodes: 6, Minimum nodes: 3"}, {"AutorepairEnabled", "status", "True"}}), LongTimeout, LongTimeout/30).Should(o.BeTrue(), "nodepool in progress error")
g.By("create nodepool with minversion and verify nodepool condition")
npNameMinVersion := "49436np-minversion-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(npNameMinVersion)
NewAWSNodePool(npNameMinVersion, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&replica).WithReleaseImage("quay.io/openshift-release-dev/ocp-release:4.10.45-x86_64").CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameMinVersion, []nodePoolCondition{{"ValidReleaseImage", "message", getMinSupportedOCPVersion()}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:liangli-Critical-54284-Hypershift creates extra EC2 instances", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 54284 is for AWS - skipping test ...")
}
autoCreatedForInfra := doOcpReq(oc, OcpGet, true, "nodepool", "-n", hostedcluster.namespace, fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].metadata.labels.hypershift\.openshift\.io/auto-created-for-infra}`, hostedcluster.name))
e2e.Logf("autoCreatedForInfra:" + autoCreatedForInfra)
nodepoolName := doOcpReq(oc, OcpGet, true, "nodepool", "-n", hostedcluster.namespace, fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].metadata.name}`, hostedcluster.name))
e2e.Logf("nodepoolName:" + nodepoolName)
additionalTags := doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, nodepoolName, fmt.Sprintf(`-ojsonpath={.spec.template.spec.additionalTags.kubernetes\.io/cluster/%s}`, autoCreatedForInfra))
o.Expect(additionalTags).Should(o.ContainSubstring("owned"))
generation := doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, nodepoolName, `-ojsonpath={.metadata.generation}`)
o.Expect(generation).Should(o.Equal("1"))
})
// author: [email protected]
g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:liangli-Critical-54551-Reconcile NodePool label against Nodes[Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 54551 is for AWS - skipping test ...")
}
replica := 1
nodepoolName := "54551np-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(nodepoolName)
hostedcluster.createAwsNodePool(nodepoolName, replica)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(nodepoolName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
g.By("Check if the nodepool name is propagated from the nodepool to the machine annotation")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "awsmachines", "-n", hostedcluster.namespace+"-"+hostedcluster.name, `-ojsonpath={.items[*].metadata.annotations.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replica))
g.By("Check if the nodepool name is propagated from machine to node label")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "node", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile, `-ojsonpath={.items[*].metadata.labels.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replica))
g.By("Scale up the nodepool")
replicasIntNew := replica + 1
defer func() {
doOcpReq(oc, OcpScale, true, "nodepool", "-n", hostedcluster.namespace, nodepoolName, fmt.Sprintf("--replicas=%d", replica))
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(nodepoolName), LongTimeout, LongTimeout/10).Should(o.Equal(replica), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedcluster.name))
}()
doOcpReq(oc, OcpScale, true, "nodepool", "-n", hostedcluster.namespace, nodepoolName, fmt.Sprintf("--replicas=%d", replicasIntNew))
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(nodepoolName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(replicasIntNew), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedcluster.name))
g.By("Check if the nodepool name is propagated from the nodepool to the machine annotation")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "awsmachines", "-n", hostedcluster.namespace+"-"+hostedcluster.name, `-ojsonpath={.items[*].metadata.annotations.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replicasIntNew))
g.By("Check if the nodepool name is propagated from machine to node label")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "node", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile, `-ojsonpath={.items[*].metadata.labels.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replicasIntNew))
})
// author: [email protected]
g.It("Author:mihuang-ROSA-OSD_CCS-HyperShiftMGMT-Longduration-NonPreRelease-Critical-49108-Critical-49499-Critical-59546-Critical-60490-Critical-61970-Separate client certificate trust from the global hypershift CA", func(ctx context.Context) {
if iaasPlatform != "aws" && iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49108 is for AWS or Azure - For other platforms, please set the corresponding expectedMetric to make this case effective. Skipping test ...")
}
exutil.SkipOnAKSNess(ctx, oc, false)
g.By("OCP-61970: OCPBUGS-10792-Changing the api group of the hypershift namespace servicemonitor back to coreos.com")
o.Expect(doOcpReq(oc, OcpGet, true, "servicemonitor", "-n", "hypershift", "-ojsonpath={.items[*].apiVersion}")).Should(o.ContainSubstring("coreos.com"))
g.By("Add label to namespace enable monitoring for hosted control plane component.")
defer doOcpReq(oc, "label", true, "namespace", hostedcluster.namespace+"-"+hostedcluster.name, "openshift.io/cluster-monitoring-")
doOcpReq(oc, "label", true, "namespace", hostedcluster.namespace+"-"+hostedcluster.name, "openshift.io/cluster-monitoring=true", "--overwrite=true")
g.By("OCP-49499 && 49108 Check metric works well for the hosted control plane component.")
o.Expect(doOcpReq(oc, OcpGet, true, "ns", hostedcluster.namespace+"-"+hostedcluster.name, "--show-labels")).Should(o.ContainSubstring("openshift.io/cluster-monitoring=true"))
serviceMonitors := strings.Split(doOcpReq(oc, OcpGet, true, "servicemonitors", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.items[*].metadata.name}"), " ")
o.Expect(serviceMonitors).ShouldNot(o.BeEmpty())
podMonitors := strings.Split(doOcpReq(oc, OcpGet, true, "podmonitors", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.items[*].metadata.name}"), " ")
o.Expect(podMonitors).ShouldNot(o.BeEmpty())
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
return strings.Contains(doOcpReq(oc, OcpExec, true, "-n", "openshift-monitoring", "prometheus-k8s-0", "-c", "prometheus", "--", "sh", "-c", fmt.Sprintf(" curl -k -g -H \"Authorization: Bearer %s\" https://thanos-querier.openshift-monitoring.svc:9091/api/v1/alerts", token)), `"status":"success"`)
}, 5*LongTimeout, LongTimeout/5).Should(o.BeTrue(), fmt.Sprintf("not all metrics in hostedcluster %s are ready", hostedcluster.name))
o.Eventually(func() bool {
metricsOutput, err := oc.AsAdmin().Run("exec").Args("-n", "openshift-monitoring", "prometheus-k8s-0", "-c", "prometheus", "--", "sh", "-c", fmt.Sprintf("curl -sS --cacert /etc/prometheus/certs/configmap_%s_root-ca_ca.crt --key /etc/prometheus/certs/secret_%s_metrics-client_tls.key --cert /etc/prometheus/certs/secret_%s_metrics-client_tls.crt https://openshift-apiserver.%s.svc/metrics", hostedcluster.namespace+"-"+hostedcluster.name, hostedcluster.namespace+"-"+hostedcluster.name, hostedcluster.namespace+"-"+hostedcluster.name, hostedcluster.namespace+"-"+hostedcluster.name)).Output()
if err != nil {
return false
}
var expectedMetric string
switch iaasPlatform {
case "aws":
expectedMetric = "# HELP aggregator_openapi_v2_regeneration_count [ALPHA] Counter of OpenAPI v2 spec regeneration count broken down by causing APIService name and reason."
case "azure":
expectedMetric = "# HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated"
}
return strings.Contains(metricsOutput, expectedMetric)
}, 5*LongTimeout, LongTimeout/5).Should(o.BeTrue(), fmt.Sprintf("not all metrics in hostedcluster %s are ready", hostedcluster.name))
g.By("OCP-49499 Check the clusterID is exist")
o.Expect(doOcpReq(oc, OcpGet, true, "hostedclusters", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.clusterID}")).ShouldNot(o.BeEmpty())
g.By("OCP-49499 Check the clusterID label in serviceMonitors/podMonitors and target is up")
o.Expect(doOcpReq(oc, OcpExec, true, "-n", "openshift-monitoring", "prometheus-k8s-0", "-c", "prometheus", "--", "sh", "-c", `curl -k -H "Authorization: Bearer `+token+`" https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`)).Should(o.ContainSubstring("up"))
for _, serviceMonitor := range serviceMonitors {
o.Expect(doOcpReq(oc, OcpGet, true, "servicemonitors", serviceMonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.spec.endpoints[?(@.relabelings)]}")).Should(o.ContainSubstring(`"targetLabel":"_id"`))
o.Expect(doOcpReq(oc, OcpGet, true, "servicemonitors", serviceMonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.apiVersion}")).Should(o.ContainSubstring("coreos.com"))
}
for _, podmonitor := range podMonitors {
o.Expect(doOcpReq(oc, OcpGet, true, "podmonitors", podmonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.spec.podMetricsEndpoints[?(@.relabelings)]}")).Should(o.ContainSubstring(`"targetLabel":"_id"`))
o.Expect(doOcpReq(oc, OcpGet, true, "podmonitors", podmonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.apiVersion}")).Should(o.ContainSubstring("coreos.com"))
}
g.By("OCP-59546 Export HostedCluster metrics")
hostedClusterMetricsName := []string{"hypershift_cluster_available_duration_seconds", "hypershift_cluster_deletion_duration_seconds", "hypershift_cluster_guest_cloud_resources_deletion_duration_seconds", "hypershift_cluster_identity_providers", "hypershift_cluster_initial_rollout_duration_seconds", "hypershift_cluster_limited_support_enabled", "hypershift_cluster_proxy", "hypershift_hostedclusters", "hypershift_hostedclusters_failure_conditions", "hypershift_hostedcluster_nodepools", "hypershift_nodepools", "hypershift_nodepools_failure_conditions", "hypershift_nodepools_size"}
hypershiftOperatorPodName := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", "hypershift", "-l", "app=operator", `-ojsonpath={.items[*].metadata.name}`), " ")
var metrics []string
for _, podName := range hypershiftOperatorPodName {
for _, name := range hostedClusterMetricsName {
if strings.Contains(doOcpReq(oc, OcpExec, true, "-n", "hypershift", podName, "--", "curl", "0.0.0.0:9000/metrics"), name) {
metrics = append(metrics, name)
}
}
}
e2e.Logf("metrics: %v is exported by hypershift operator", metrics)
g.By("OCP-60490 Verify that cert files not been modified")
dirname := "/tmp/kube-root-60490"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
doOcpReq(oc, "cp", true, "-n", "openshift-console", doOcpReq(oc, OcpGet, true, "pod", "-n", "openshift-console", "-o", "jsonpath={.items[0].metadata.name}")+":"+fmt.Sprintf("/var/run/secrets/kubernetes.io/serviceaccount/..data/ca.crt"), dirname+"/serviceaccount_ca.crt")
doOcpReq(oc, "extract", true, "cm/kube-root-ca.crt", "-n", "openshift-console", "--to="+dirname, "--confirm")
var bashClient = NewCmdClient().WithShowInfo(true)
md5Value1, err := bashClient.Run(fmt.Sprintf("md5sum %s | awk '{print $1}'", dirname+"/serviceaccount_ca.crt")).Output()
o.Expect(err).NotTo(o.HaveOccurred())
md5Value2, err := bashClient.Run(fmt.Sprintf("md5sum %s | awk '{print $1}'", dirname+"/ca.crt")).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(md5Value1).Should(o.Equal(md5Value2))
g.By("Verify that client certificate trust is separated from the global Hypershift CA")
o.Expect(bashClient.Run(fmt.Sprintf("grep client-certificate-data %s | grep -Eo \"[^ ]+$\" | base64 -d > %s", os.Getenv("KUBECONFIG"), dirname+"/system-admin_client.crt")).Output()).Should(o.BeEmpty())
res1, err := bashClient.Run(fmt.Sprintf("openssl verify -CAfile %s %s", dirname+"/serviceaccount_ca.crt", dirname+"/system-admin_client.crt")).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(res1).Should(o.ContainSubstring(fmt.Sprintf("error %s: verification failed", dirname+"/system-admin_client.crt")))
res2, err := bashClient.Run(fmt.Sprintf("openssl verify -CAfile %s %s", dirname+"/ca.crt", dirname+"/system-admin_client.crt")).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(res2).Should(o.ContainSubstring(fmt.Sprintf("error %s: verification failed", dirname+"/system-admin_client.crt")))
})
// TODO: fix it so it could run as a part of the aws-ipi-ovn-hypershift-private-mgmt-f7 job.
// author: [email protected]
g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:mihuang-Critical-60744-Better signal for NodePool inability to talk to management side [Disruptive] [Flaky]", func() {
g.By("Create a nodepool to verify that NodePool inability to talk to management side")
if hostedclusterPlatform != "aws" {
g.Skip("HostedCluster platform is " + hostedclusterPlatform + " which is not supported in this test.")
}
replica := 1
nodepoolName := "60744np-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(nodepoolName)
hostedcluster.createAwsNodePool(nodepoolName, replica)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(nodepoolName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
o.Expect(hostedcluster.checkNodePoolConditions(nodepoolName, []nodePoolCondition{{"ReachedIgnitionEndpoint", "status", "True"}})).Should(o.BeTrue(), "nodepool ready error")
g.By("Check if metric 'ign_server_get_request' is exposed for nodepool by ignition server")
o.Expect(strings.Contains(doOcpReq(oc, "logs", true, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-l", "app=ignition-server"), "ignition")).Should(o.BeTrue())
ignitionPodNameList := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-o", `jsonpath={.items[?(@.metadata.labels.app=="ignition-server")].metadata.name}`), " ")
foundMetric := false
for _, ignitionPodName := range ignitionPodNameList {
if strings.Contains(doOcpReq(oc, OcpExec, true, "-n", hostedcluster.namespace+"-"+hostedcluster.name, ignitionPodName, "--", "curl", "0.0.0.0:8080/metrics"), fmt.Sprintf(`ign_server_get_request{nodePool="clusters/%s"}`, nodepoolName)) {
foundMetric = true
break
}
}
o.Expect(foundMetric).Should(o.BeTrue(), "ignition server get request metric not found")
g.By("Modify ACL on VPC, deny all inbound and outbound traffic")
vpc := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", "jsonpath={.spec.platform.aws.cloudProviderConfig.vpc}")
region := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", "jsonpath={.spec.platform.aws.region}")
var bashClient = NewCmdClient().WithShowInfo(true)
acl, err := bashClient.Run(fmt.Sprintf(`aws ec2 describe-network-acls --filters Name=vpc-id,Values=%s --query 'NetworkAcls[].NetworkAclId' --region %s --output text`, vpc, region)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(acl).Should(o.ContainSubstring("acl-"))
defer func() {
bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --ingress --rule-number 100 --protocol -1 --rule-action allow --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
}()
cmdOutDeny1, err := bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --ingress --rule-number 100 --protocol -1 --rule-action deny --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmdOutDeny1).Should(o.BeEmpty())
defer func() {
bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --egress --rule-number 100 --protocol -1 --rule-action allow --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
}()
cmdOutDeny2, err := bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --egress --rule-number 100 --protocol -1 --rule-action deny --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmdOutDeny2).Should(o.BeEmpty())
g.By("Check metric 'ign_server_get_request' is not exposed for nodepool by ignition server after ACL modification")
nodepoolName1 := "60744np-1-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(nodepoolName1)
hostedcluster.createAwsNodePool(nodepoolName1, replica)
hostedcluster.setNodepoolAutoRepair(nodepoolName1, "true")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(nodepoolName1, []nodePoolCondition{{"ReachedIgnitionEndpoint", "status", "False"}, {"AutorepairEnabled", "status", "False"}}), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
for _, ignitionPodName := range ignitionPodNameList {
o.Expect(doOcpReq(oc, OcpExec, true, "-n", hostedcluster.namespace+"-"+hostedcluster.name, ignitionPodName, "--", "curl", "0.0.0.0:8080/metrics")).ShouldNot(o.ContainSubstring(fmt.Sprintf(`ign_server_get_request{nodePool="clusters/%s"}`, nodepoolName1)))
}
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:mihuang-Critical-60903-Test must-gather on the hostedcluster", func() {
mustgatherDir := "/tmp/must-gather-60903"
defer os.RemoveAll(mustgatherDir)
err := os.MkdirAll(mustgatherDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check must-gather works well on the hostedcluster.")
err = oc.AsGuestKubeconf().Run(OcpAdm).Args("must-gather", "--dest-dir="+mustgatherDir, "--", "/usr/bin/gather_audit_logs").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred(), "error running must-gather against the HC")
var bashClient = NewCmdClient().WithShowInfo(true)
cmdOut, err := bashClient.Run(fmt.Sprintf(`du -h %v`, mustgatherDir)).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(cmdOut).ShouldNot(o.Equal("0B"))
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:mihuang-Critical-61604-Validate network input and signal in hyperv1.ValidHostedClusterConfiguration[Disruptive]", func() {
g.By("Patch hostedcluster to set network to invalid value and check the ValidConfiguration conditions of hostedcluster CR")
clusterNetworkCidr := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", `jsonpath={.spec.networking.clusterNetwork[0].cidr}`)
defer doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"networking":{"clusterNetwork":[{"cidr": "`+clusterNetworkCidr+`"}]}}}`)
doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"networking":{"clusterNetwork":[{"cidr": "172.31.0.0/16"}]}}}`)
o.Eventually(func() bool {
if strings.Contains(doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", `jsonpath={.status.conditions[?(@.type=="ValidConfiguration")].reason}`), "InvalidConfiguration") {
return true
}
return false
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "conditions are not changed")
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:mihuang-Critical-62195-Add validation for taint.value in nodePool[Serial][Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 62195 is for AWS - skipping test ...")
}
g.By("Create a nodepool with invalid taint value and check the ValidConfiguration conditions of hostedcluster CR")
nodepoolName := "62195np" + strings.ToLower(exutil.RandStrDefault())
defer func() {
hostedcluster.deleteNodePool(nodepoolName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
hostedcluster.createAwsNodePool(nodepoolName, 1)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
nodeName := doOcpReq(oc, OcpGet, true, "node", "-l", "hypershift.openshift.io/nodePool="+nodepoolName, "-ojsonpath={.items[*].metadata.name}", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "nodes", nodeName, "node-role.kubernetes.io/infra=//:NoSchedule", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile).Output()
o.Expect(err).Should(o.HaveOccurred())
defer doOcpReq(oc, OcpAdm, true, "taint", "nodes", nodeName, "node-role.kubernetes.io/infra=foo:NoSchedule-", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "nodes", nodeName, "node-role.kubernetes.io/infra=foo:NoSchedule", "--overwrite", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(doOcpReq(oc, OcpGet, true, "node", nodeName, "-o", "jsonpath={.spec.taints[0].value}", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)).Should(o.Equal("foo"))
})
// author: [email protected]
g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-60140-[AWS]-create default security group when no security group is specified in a nodepool[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while ocp-60140 is for AWS - skipping test ...")
}
caseID := "60140"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check hosted cluster's default worker securitygroup ID")
defaultSG := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}`)
e2e.Logf("defaultWorkerSecurityGroupID in hostedcluster is %s", defaultSG)
g.By("check nodepool and awsmachinetemplate's securitygroup ID")
nodepoolName := doOcpReq(oc, OcpGet, true, "nodepool", "-n", hostedcluster.namespace, "--ignore-not-found", fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].metadata.name}`, hostedcluster.name))
o.Expect(nodepoolName).ShouldNot(o.BeEmpty())
if arr := strings.Split(nodepoolName, " "); len(arr) > 1 {
nodepoolName = arr[0]
}
// OCPBUGS-29723,HOSTEDCP-1419 make sure there is no sg spec in nodepool
o.Expect(doOcpReq(oc, OcpGet, false, "nodepool", "-n", hostedcluster.namespace, nodepoolName, "--ignore-not-found", `-ojsonpath={.spec.platform.aws.securityGroups}`)).Should(o.BeEmpty())
queryJson := fmt.Sprintf(`-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.additionalSecurityGroups[0].id}`, hostedcluster.namespace, nodepoolName)
o.Expect(doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "--ignore-not-found", "-n", hostedcluster.namespace+"-"+hostedcluster.name, queryJson)).Should(o.ContainSubstring(defaultSG))
g.By("create nodepool without default securitygroup")
npCount := 1
npWithoutSG := "np-60140-default-sg"
defer func() {
hostedcluster.deleteNodePool(npWithoutSG)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npWithoutSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
// OCPBUGS-29723,HOSTEDCP-1419 there is no sg spec in np now. Just use NewAWSNodePool() to create a np without sg settings
NewAWSNodePool(npWithoutSG, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&npCount).CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npWithoutSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), " check np ready error")
g.By("check the new nodepool should use the default sg in the hosted cluster")
queryJson = fmt.Sprintf(`-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.additionalSecurityGroups[0].id}`, hostedcluster.namespace, npWithoutSG)
o.Expect(doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, queryJson)).Should(o.ContainSubstring(defaultSG))
g.By("create sg by aws client and use it to create a nodepool")
vpcID := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.spec.platform.aws.cloudProviderConfig.vpc}`)
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
groupID, err := awsClient.CreateSecurityGroup(fmt.Sprintf("ocp-60140-sg-%s", strings.ToLower(exutil.RandStrDefault())), vpcID, "hypershift ocp-60140")
o.Expect(err).ShouldNot(o.HaveOccurred())
defer awsClient.DeleteSecurityGroup(groupID)
npWithExistingSG := "np-60140-existing-sg"
defer func() {
hostedcluster.deleteNodePool(npWithExistingSG)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npWithExistingSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npWithExistingSG, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&npCount).WithSecurityGroupID(groupID).CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npWithExistingSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npWithExistingSG))
queryJson = fmt.Sprintf(`-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.additionalSecurityGroups}`, hostedcluster.namespace, npWithExistingSG)
sgInfo := doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, queryJson)
o.Expect(sgInfo).Should(o.ContainSubstring(groupID))
// HOSTEDCP-1419 the default sg should be included all the time
o.Expect(sgInfo).Should(o.ContainSubstring(defaultSG))
g.By("nodepool security group test passed")
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:heli-Critical-63867-[AWS]-awsendpointservice uses the default security group for the VPC Endpoint", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while ocp-63867 is for AWS - skipping test ...")
}
endpointAccessType := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.spec.platform.aws.endpointAccess}`)
if endpointAccessType != PublicAndPrivate && endpointAccessType != Private {
g.Skip(fmt.Sprintf("ocp-63867 is for PublicAndPrivate or Private hosted clusters on AWS, skip it for the endpointAccessType is %s", endpointAccessType))
}
g.By("check status of cluster again by condition type Available")
status := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="Available")].status}`)
o.Expect(status).Should(o.Equal("True"))
g.By("get default sg of vpc")
vpcID := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.spec.platform.aws.cloudProviderConfig.vpc}`)
e2e.Logf("hc vpc is %s", vpcID)
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
defaultVPCSG, err := awsClient.GetDefaultSecurityGroupByVpcID(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("in PublicAndPrivate or Private clusters, default sg of vpc should not has hypershift tags kubernetes.io/cluster/{infra-id}:owned")
hcTagKey := HyperShiftResourceTagKeyPrefix + hcInfraID
for _, tag := range defaultVPCSG.Tags {
if tag.Key != nil && *tag.Key == hcTagKey {
o.Expect(*tag.Value).ShouldNot(o.Equal(HyperShiftResourceTagKeyValue))
}
}
g.By("check hosted cluster's default worker security group ID")
defaultWorkerSG := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}`)
e2e.Logf("defaultWorkerSecurityGroupID in hostedcluster is %s", defaultWorkerSG)
o.Expect(defaultWorkerSG).NotTo(o.Equal(defaultVPCSG))
g.By("check endpointID by vpc")
endpointIDs := doOcpReq(oc, OcpGet, true, "awsendpointservice", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "--ignore-not-found", `-ojsonpath={.items[*].status.endpointID}`)
endpointIDArr := strings.Split(endpointIDs, " ")
o.Expect(endpointIDArr).ShouldNot(o.BeEmpty())
for _, epID := range endpointIDArr {
sgs, err := awsClient.GetSecurityGroupsByVpcEndpointID(epID)
o.Expect(err).NotTo(o.HaveOccurred())
for _, sg := range sgs {
e2e.Logf("endpoint %s security group %s, %s, ", epID, *sg.GroupId, *sg.GroupName)
o.Expect(*sg.GroupId).Should(o.Equal(defaultWorkerSG))
o.Expect(*sg.GroupName).Should(o.Equal(hcInfraID + "-default-sg"))
}
}
g.By("ocp-63867 the default security group of endpointservice test passed")
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:liangli-Critical-48510-Test project configuration resources on the guest cluster[Disruptive]", func() {
caseID := "48510"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
g.By("Generate the default project template")
_, err = bashClient.Run(fmt.Sprintf("oc adm create-bootstrap-project-template -oyaml --kubeconfig=%s > %s", hostedcluster.hostedClustersKubeconfigFile, dir+"/template.yaml")).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Add ResourceQuota and LimitRange in the template")
patchYaml := `- apiVersion: v1
kind: "LimitRange"
metadata:
name: ${PROJECT_NAME}-limits
spec:
limits:
- type: "Container"
default:
cpu: "1"
memory: "1Gi"
defaultRequest:
cpu: "500m"
memory: "500Mi"
- apiVersion: v1
kind: ResourceQuota
metadata:
name: ${PROJECT_NAME}-quota
spec:
hard:
pods: "10"
requests.cpu: "4"
requests.memory: 8Gi
limits.cpu: "6"
limits.memory: 16Gi
requests.storage: 20G
`
tempFilePath := filepath.Join(dir, "temp.yaml")
err = ioutil.WriteFile(tempFilePath, []byte(patchYaml), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = bashClient.Run(fmt.Sprintf(`sed -i '/^parameters:/e cat %s' %s`, dir+"/temp.yaml", dir+"/template.yaml")).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
defer hostedcluster.oc.AsGuestKubeconf().Run("delete").Args("-f", dir+"/template.yaml", "-n", "openshift-config").Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("apply").Args("-f", dir+"/template.yaml", "-n", "openshift-config").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Edit the project config resource to include projectRequestTemplate in the spec")
defer hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestTemplate": null}}`).Execute()
}, "openshift-apiserver")
hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestTemplate":{"name":"project-request"}}}`).Execute()
}, "openshift-apiserver")
g.By("Create a new project 'test-48510'")
origContxt, contxtErr := oc.SetGuestKubeconf(hostedcluster.hostedClustersKubeconfigFile).AsGuestKubeconf().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
err = oc.SetGuestKubeconf(hostedcluster.hostedClustersKubeconfigFile).AsGuestKubeconf().Run("config").Args("use-context", origContxt).Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
err = hostedcluster.oc.AsGuestKubeconf().Run("delete").Args("project", "test-48510").Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
}()
err = hostedcluster.oc.AsGuestKubeconf().Run("new-project").Args("test-48510").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Check if new project config resource includes ResourceQuota and LimitRange")
testProjectDes, err := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("resourcequota", "-n", "test-48510", "-oyaml").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
checkSubstring(testProjectDes, []string{`pods: "10"`, `requests.cpu: "4"`, `requests.memory: 8Gi`, `limits.cpu: "6"`, `limits.memory: 16Gi`, `requests.storage: 20G`})
g.By("Disable project self-provisioning, remove the self-provisioner cluster role from the group")
defer hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"subjects": [{"apiGroup":"rbac.authorization.k8s.io","kind":"Group","name":"system:authenticated:oauth"}]}`).Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"subjects": null}`).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
selfProvisionersDes, err := hostedcluster.oc.AsGuestKubeconf().Run("describe").Args("clusterrolebinding.rbac", "self-provisioners").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(selfProvisionersDes).ShouldNot(o.ContainSubstring("system:authenticated:oauth"))
g.By("Edit the self-provisioners cluster role binding to prevent automatic updates to the role")
defer hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"metadata":{"annotations":{"rbac.authorization.kubernetes.io/autoupdate":"true"}}}`).Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"metadata":{"annotations":{"rbac.authorization.kubernetes.io/autoupdate":"false"}}}`).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
selfProvisionersDes, err = hostedcluster.oc.AsGuestKubeconf().Run("describe").Args("clusterrolebinding.rbac", "self-provisioners").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(selfProvisionersDes).ShouldNot(o.ContainSubstring(`rbac.authorization.kubernetes.io/autoupdate: "false"`))
g.By("Edit project config resource to include the project request message")
defer hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestMessage": null}}`).Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestMessage":"To request a project, contact your system administrator at [email protected] :-)"}}`).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Create a new project as a non-admin user")
var testRequestMess string
o.Eventually(func() string {
testRequestMess, _ = bashClient.Run(fmt.Sprintf("oc new-project test-request-message --as=liangli --as-group=system:authenticated --as-group=system:authenticated:oauth --kubeconfig=%s || true", hostedcluster.hostedClustersKubeconfigFile)).Output()
return testRequestMess
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("To request a project, contact your system administrator at [email protected] :-)"), "check projectRequestMessage error")
})
// author: [email protected]
g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:heli-Critical-52318-[AWS]-Enforce machineconfiguration.openshift.io/role worker in machine config[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Skipping incompatible platform %s", iaasPlatform))
}
g.By("create a configmap for MachineConfig")
fakePubKey := "AAAAB3NzaC1yc2EAAAADAQABAAABgQC0IRdwFtIIy0aURM64dDy0ogqJlV0aqDqw1Pw9VFc8bFSI7zxQ2c3Tt6GrC+Eg7y6mXQbw59laiGlyA+Qmyg0Dgd7BUVg1r8j" +
"RR6Xhf5XbI+tQBhoTQ6BBJKejE60LvyVUiBstGAm7jy6BkfN/5Ulvd8r3OVDYcKczVECWuOQeuPRyTHomR4twQj79+shZkN6tjptQOTTSDJJYIZOmaj9TsDN4bLIxqDYWZC0F6+" +
"TvBoRV7xxOBU8DHxZ9wbCZN4IyEs6U77G8bQBP2Pjbp5NrG93nvdnLcv" +
`CDsnSOFuiay1KNqjOclIlsrb84qN9TFL3PgLoGohz2vInlaTnopCh4m7+xDgu5bdh1B/hNjDHDTHFpHPP8z7vkWM0I4I8q853E4prGRBpyVztcObeDr/0M/Vnwawyb9Lia16J5hSBi0o3UjxE= jiezhao@cube`
configmapMachineConfTemplate := filepath.Join(hypershiftTeamBaseDir, "configmap-machineconfig.yaml")
configmapName := "custom-ssh-config-52318"
cm := configmapMachineConf{
Name: configmapName,
Namespace: hostedcluster.namespace,
SSHAuthorizedKeys: fakePubKey,
Template: configmapMachineConfTemplate,
}
parsedCMFile := "ocp-52318-configmap-machineconfig-template.config"
defer cm.delete(oc, "", parsedCMFile)
cm.create(oc, "", parsedCMFile)
doOcpReq(oc, OcpGet, true, "configmap", configmapName, "-n", hostedcluster.namespace)
g.By("create a nodepool")
npName := "np-52318"
npCount := 1
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&npCount).CreateAWSNodePool()
patchOptions := fmt.Sprintf(`{"spec":{"config":[{"name":"%s"}]}}`, configmapName)
doOcpReq(oc, OcpPatch, true, "nodepool", npName, "-n", hostedcluster.namespace, "--type", "merge", "-p", patchOptions)
g.By("condition UpdatingConfig should be here to reflect nodepool config rolling upgrade")
o.Eventually(func() bool {
return "True" == doOcpReq(oc, OcpGet, false, "nodepool", npName, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="UpdatingConfig")].status}`)
}, ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "nodepool condition UpdatingConfig not found error")
g.By("condition UpdatingConfig should be removed when upgrade completed")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, false, "nodepool", npName, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="UpdatingConfig")].status}`)
}, LongTimeout, LongTimeout/10).Should(o.BeEmpty(), "nodepool condition UpdatingConfig should be removed")
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("check ssh key in worker nodes")
o.Eventually(func() bool {
workerNodes := hostedcluster.getNodeNameByNodepool(npName)
o.Expect(workerNodes).ShouldNot(o.BeEmpty())
for _, node := range workerNodes {
res, err := hostedcluster.DebugHostedClusterNodeWithChroot("52318", node, "cat", "/home/core/.ssh/authorized_keys")
if err != nil {
e2e.Logf("debug node error node %s: error: %s", node, err.Error())
return false
}
if !strings.Contains(res, fakePubKey) {
e2e.Logf("could not find expected key in node %s: debug ouput: %s", node, res)
return false
}
}
return true
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "key not found error in nodes")
g.By("ocp-52318 Enforce machineconfiguration.openshift.io/role worker in machine config test passed")
})
// author: [email protected]
g.It("HyperShiftMGMT-Author:liangli-Critical-48511-Test project configuration resources on the guest cluster[Serial]", func() {
caseID := "48511"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new project 'test-48511'")
origContxt, contxtErr := hostedcluster.oc.AsGuestKubeconf().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
err = hostedcluster.oc.AsGuestKubeconf().Run("config").Args("use-context", origContxt).Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
err = hostedcluster.oc.AsGuestKubeconf().Run("delete").Args("project", "test-48511").Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
}()
err = hostedcluster.oc.AsGuestKubeconf().Run("new-project").Args("test-48511").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Create a build")
helloWorldSource := "quay.io/openshifttest/ruby-27:1.2.0~https://github.com/openshift/ruby-hello-world"
err = hostedcluster.oc.AsGuestKubeconf().Run("new-build").Args(helloWorldSource, "--name=build-48511", "-n", "test-48511").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Check build")
var buildPhase string
o.Eventually(func() string {
buildPhase, _ = hostedcluster.oc.AsGuestKubeconf().Run("get").Args("builds", "-n", "test-48511", "build-48511-1", `-ojsonpath={.status.phase}`).Output()
return buildPhase
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("Complete"), "wait for the rebuild job complete timeout")
g.By("Add a label on a node")
nodeName, err := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", `-ojsonpath={.items[0].metadata.name}`).Output()
defer hostedcluster.oc.AsGuestKubeconf().Run("label").Args("node", nodeName, "test-").Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("label").Args("node", nodeName, "test=test1", "--overwrite").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Update nodeSelector in build.config.openshift.io/cluster")
defer hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("build.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"buildOverrides": null}}`).Execute()
}, "openshift-controller-manager")
hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("build.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"buildOverrides":{"nodeSelector":{"test":"test1"}}}}`).Execute()
}, "openshift-controller-manager")
g.By("Re-run a build")
err = hostedcluster.oc.AsGuestKubeconf().Run("start-build").Args("--from-build=build-48511-1", "-n", "test-48511", "build-48511-1").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Check a new build")
o.Eventually(func() string {
buildPhase, _ = hostedcluster.oc.AsGuestKubeconf().Run("get").Args("builds", "-n", "test-48511", "build-48511-2", `-ojsonpath={.status.phase}`).Output()
return buildPhase
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("Complete"), "wait for the rebuild job complete timeout")
g.By("Check if a new build pod runs on correct node")
podNodeName, _ := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("pod", "-n", "test-48511", "build-48511-2-build", `-ojsonpath={.spec.nodeName}`).Output()
o.Expect(podNodeName).Should(o.Equal(nodeName))
})
// author: [email protected]
// Note: for oauth IDP gitlab and github, we can not use <oc login> to verify it directly. Here we just config them in
// the hostedcluster and check the related oauth pod/configmap resources are as expected.
// The whole oauth e2e is covered by oauth https://issues.redhat.com/browse/OCPQE-13439
// LDAP: OCP-23334, GitHub: OCP-22274, gitlab: OCP-22271, now they don't have plans to auto them because of some idp provider's limitation
g.It("HyperShiftMGMT-Author:heli-Critical-54476-Critical-62511-Ensure that OAuth server can communicate with GitLab (GitHub) [Serial]", func() {
g.By("backup current hostedcluster CR")
var bashClient = NewCmdClient()
var hcBackupFile string
defer os.Remove(hcBackupFile)
hcBackupFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hc", hostedcluster.name, "-n", hostedcluster.namespace, "-oyaml").OutputToFile("hypershift-54476-62511")
o.Expect(err).ShouldNot(o.HaveOccurred())
_, err = bashClient.Run(fmt.Sprintf("sed -i '/resourceVersion:/d' %s", hcBackupFile)).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("get OAuth callback URL")
gitlabIDPName := "gitlabidp-54476"
gitlabSecretName := "gitlab-secret-54476"
fakeSecret := "fakeb577d60316d0573de82b8545c8e75c2a48156bcc"
gitlabConf := fmt.Sprintf(`{"spec":{"configuration":{"oauth":{"identityProviders":[{"gitlab":{"clientID":"fake4c397","clientSecret":{"name":"%s"},"url":"https://gitlab.com"},"mappingMethod":"claim","name":"%s","type":"GitLab"}]}}}}`, gitlabSecretName, gitlabIDPName)
githubIDPName := "githubidp-62511"
githubSecretName := "github-secret-62511"
githubConf := fmt.Sprintf(`{"spec":{"configuration":{"oauth":{"identityProviders":[{"github":{"clientID":"f90150abb","clientSecret":{"name":"%s"}},"mappingMethod":"claim","name":"%s","type":"GitHub"}]}}}}`, githubSecretName, githubIDPName)
cpNameSpace := hostedcluster.namespace + "-" + hostedcluster.name
callBackUrl := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.status.oauthCallbackURLTemplate}")
e2e.Logf("OAuth callback URL: %s", callBackUrl)
oauthRoute := doOcpReq(oc, OcpGet, true, "route", "oauth", "-n", cpNameSpace, "-ojsonpath={.spec.host}")
o.Expect(callBackUrl).Should(o.ContainSubstring(oauthRoute))
defer func() {
doOcpReq(oc, OcpApply, false, "-f", hcBackupFile)
o.Eventually(func() bool {
status := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="Available")].status}`)
if strings.TrimSpace(status) != "True" {
return false
}
replica := doOcpReq(oc, OcpGet, true, "deploy", "oauth-openshift", "-n", cpNameSpace, "-ojsonpath={.spec.replicas}")
availReplica := doOcpReq(oc, OcpGet, false, "deploy", "oauth-openshift", "-n", cpNameSpace, "-ojsonpath={.status.availableReplicas}")
if replica != availReplica {
return false
}
readyReplica := doOcpReq(oc, OcpGet, false, "deploy", "oauth-openshift", "-n", cpNameSpace, "-ojsonpath={.status.readyReplicas}")
if readyReplica != availReplica {
return false
}
return true
}, ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "recover back hosted cluster timeout")
}()
g.By("config gitlab IDP")
defer doOcpReq(oc, OcpDelete, false, "secret", gitlabSecretName, "--ignore-not-found", "-n", hostedcluster.namespace)
doOcpReq(oc, OcpCreate, true, "secret", "generic", gitlabSecretName, "-n", hostedcluster.namespace, fmt.Sprintf(`--from-literal=clientSecret="%s"`, fakeSecret))
doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type=merge", `-p=`+gitlabConf)
o.Eventually(hostedcluster.pollCheckIDPConfigReady(IdentityProviderTypeGitLab, gitlabIDPName, gitlabSecretName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "wait for the gitlab idp config ready timeout")
g.By("config github IDP")
defer doOcpReq(oc, OcpDelete, false, "secret", githubSecretName, "--ignore-not-found", "-n", hostedcluster.namespace)
doOcpReq(oc, OcpCreate, true, "secret", "generic", githubSecretName, "-n", hostedcluster.namespace, fmt.Sprintf(`--from-literal=clientSecret="%s"`, fakeSecret))
doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type=merge", `-p=`+githubConf)
o.Eventually(hostedcluster.pollCheckIDPConfigReady(IdentityProviderTypeGitHub, githubIDPName, githubSecretName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "wait for the github idp config ready timeout")
})
// author: [email protected]
g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:liangli-Critical-63535-Stop triggering rollout on labels/taint change[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Skipping incompatible platform %s", iaasPlatform))
}
caseID := "63535"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create a nodepool")
np1Count := 1
np1Name := "63535test-01"
defer func() {
hostedcluster.deleteNodePool(np1Name)
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
NewAWSNodePool(np1Name, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&np1Count).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(np1Name), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
g.By("add nodeLabels and taints in the nodepool '63535test-01'")
doOcpReq(oc, OcpPatch, true, "nodepool", np1Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"nodeLabels":{"env":"test"}}}`)
doOcpReq(oc, OcpPatch, true, "nodepool", np1Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"taints":[{"key":"env","value":"test","effect":"PreferNoSchedule"}]}}`)
o.Consistently(func() bool {
value, _ := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np1Name, "--show-labels").Output()
return strings.Contains(value, "env=test")
}, 60*time.Second, 5*time.Second).Should(o.BeFalse())
g.By("Scale the nodepool '63535test-01' to 2")
doOcpReq(oc, OcpScale, true, "nodepool", np1Name, "-n", hostedcluster.namespace, "--replicas=2")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(np1Name), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("nodepool are not scale up to 2 in hostedcluster %s", hostedcluster.name))
g.By("Check if nodeLabels and taints are propagated into new node")
taintsValue, err := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np1Name, `-lenv=test`, `-ojsonpath={.items[*].spec.taints[?(@.key=="env")].value}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(taintsValue).Should(o.ContainSubstring("test"))
g.By("Create a nodepool 'label-taint' with nodeLabels and taints")
np2Count := 1
np2Name := "63535test-02"
defer func() {
hostedcluster.deleteNodePool(np2Name)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(np2Name), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(np2Name, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&np2Count).
WithNodeUpgradeType("InPlace").
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(np2Name), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", np2Name))
defer func() {
hostedcluster.deleteNodePool(np2Name)
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
g.By("add nodeLabels and taints in the nodepool '63535test-02(InPlace)'")
doOcpReq(oc, OcpPatch, true, "nodepool", np2Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"nodeLabels":{"env":"test2"}}}`)
doOcpReq(oc, OcpPatch, true, "nodepool", np2Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"taints":[{"key":"env","value":"test2","effect":"PreferNoSchedule"}]}}`)
o.Consistently(func() bool {
value, _ := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np2Name, "--show-labels").Output()
return strings.Contains(value, "env=test2")
}, 60*time.Second, 5*time.Second).Should(o.BeFalse())
g.By("Scale the nodepool '63535test-02(InPlace)' to 2")
doOcpReq(oc, OcpScale, true, "nodepool", np2Name, "-n", hostedcluster.namespace, "--replicas=2")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(np2Name), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("nodepool are not scale up to 2 in hostedcluster %s", hostedcluster.name))
g.By("Check if nodepool 'label-taint' comes up and nodeLabels and taints are propagated into nodes")
taintsValue, err = hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np1Name, `-lenv=test2`, `-ojsonpath={.items[*].spec.taints[?(@.key=="env")].value}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(taintsValue).Should(o.ContainSubstring("test2"))
})
// Test run duration: 20min
g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-67786-Changes to NodePool .spec.platform should trigger a rolling upgrade [Serial]", func() {
// Variables
var (
testCaseId = "67786"
expectedPlatform = "aws"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
npName = fmt.Sprintf("%s-np", resourceNamePrefix)
npNumReplicas = 2
npInstanceType = "m5.xlarge"
npInstanceTypeNew = "m5.large"
)
if iaasPlatform != expectedPlatform {
g.Skip(fmt.Sprintf("Test case %s is for %s but current platform is %s, skipping", testCaseId, expectedPlatform, iaasPlatform))
}
// Avoid using an existing NodePool so other Hypershift test cases are unaffected by this one
exutil.By("Creating an additional NodePool")
releaseImage := hostedcluster.getCPReleaseImage()
e2e.Logf("Found release image used by the hosted cluster = %s", releaseImage)
defaultSgId := hostedcluster.getDefaultSgId()
o.Expect(defaultSgId).NotTo(o.BeEmpty())
e2e.Logf("Found default SG ID of the hosted cluster = %s", defaultSgId)
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s to be deleted", npName))
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&npNumReplicas).
WithReleaseImage(releaseImage).
WithInstanceType(npInstanceType).
WithSecurityGroupID(defaultSgId).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s to be ready", npName))
exutil.By("Checking instance type on CAPI resources")
awsMachineTemp, err := hostedcluster.getCurrentInfraMachineTemplatesByNodepool(context.Background(), npName)
o.Expect(err).ShouldNot(o.HaveOccurred())
instanceType, found, err := unstructured.NestedString(awsMachineTemp.Object, "spec", "template", "spec", "instanceType")
o.Expect(found).To(o.BeTrue())
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).To(o.Equal(npInstanceType))
exutil.By("Checking instance type label on nodes belonging to the newly created NodePool")
nodeList, err := oc.GuestKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
hypershiftNodePoolLabelKey: npName,
nodeInstanceTypeLabelKey: npInstanceType,
}).String(),
})
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(len(nodeList.Items)).To(o.Equal(npNumReplicas))
exutil.By(fmt.Sprintf("Change instance type to %s", npInstanceTypeNew))
patch := fmt.Sprintf(`{"spec":{"platform":{"aws":{"instanceType": "%s"}}}}`, npInstanceTypeNew)
doOcpReq(oc, OcpPatch, true, "np", npName, "-n", hostedcluster.namespace, "--type", "merge", "-p", patch)
exutil.By("Waiting for replace upgrade to complete")
upgradeType := hostedcluster.getNodepoolUpgradeType(npName)
o.Expect(upgradeType).Should(o.ContainSubstring("Replace"))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeIntermediateStatus(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s replace upgrade to start", npName))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeComplete(npName), DoubleLongTimeout, DefaultTimeout/5).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s replace upgrade to complete", npName))
exutil.By("Make sure the instance type is updated on CAPI resources")
awsMachineTemp, err = hostedcluster.getCurrentInfraMachineTemplatesByNodepool(context.Background(), npName)
o.Expect(err).ShouldNot(o.HaveOccurred())
instanceType, found, err = unstructured.NestedString(awsMachineTemp.Object, "spec", "template", "spec", "instanceType")
o.Expect(found).To(o.BeTrue())
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).To(o.Equal(npInstanceTypeNew))
exutil.By("Make sure the node instance types are updated as well")
nodeList, err = oc.GuestKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
hypershiftNodePoolLabelKey: npName,
nodeInstanceTypeLabelKey: npInstanceTypeNew,
}).String(),
})
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(len(nodeList.Items)).To(o.Equal(npNumReplicas))
})
/*
The DB size across all ETCD members is assumed to be (eventually) close to each other.
We will only consider one ETCD member for simplicity.
The creation of a large number of resources on the hosted cluster makes this test case:
- disruptive
- **suitable for running on a server** (as opposed to running locally)
Test run duration: ~30min
*/
g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-70974-Test Hosted Cluster etcd automatic defragmentation [Disruptive]", func() {
if !hostedcluster.isCPHighlyAvailable() {
g.Skip("This test case runs against a hosted cluster with highly available control plane, skipping")
}
var (
testCaseId = "70974"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tmpDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
hcpNs = hostedcluster.getHostedComponentNamespace()
cmNamePrefix = fmt.Sprintf("%s-cm", resourceNamePrefix)
cmIdx = 0
cmBatchSize = 500
cmData = strings.Repeat("a", 100_000)
cmNs = "default"
etcdDefragThreshold = 0.45
etcdDefragMargin = 0.05
etcdDbSwellingRate = 4
etcdDbContractionRate = 2
testEtcdEndpointIdx = 0
)
var (
getCM = func() string {
cmIdx++
return fmt.Sprintf(`apiVersion: v1
kind: ConfigMap
metadata:
name: %s-%03d
namespace: %s
labels:
foo: bar
data:
foo: %s
---
`, cmNamePrefix, cmIdx, cmNs, cmData)
}
)
exutil.By("Creating temporary directory")
err := os.MkdirAll(tmpDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err = os.RemoveAll(tmpDir)
o.Expect(err).NotTo(o.HaveOccurred())
}()
exutil.By("Making sure the (hosted) control plane is highly available by checking the number of etcd Pods")
etcdPodCountStr := doOcpReq(oc, OcpGet, true, "sts", "etcd", "-n", hcpNs, "-o=jsonpath={.spec.replicas}")
o.Expect(strconv.Atoi(etcdPodCountStr)).To(o.BeNumerically(">", 1), "Expect >1 etcd Pods")
exutil.By("Getting DB size of an ETCD member")
_, dbSizeInUse, _, err := hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
targetDbSize := dbSizeInUse * int64(etcdDbSwellingRate)
e2e.Logf("Found initial ETCD member DB size in use = %d, target ETCD member DB size = %d", dbSizeInUse, targetDbSize)
exutil.By("Creating ConfigMaps on the guest cluster until the ETCD member DB size is large enough")
var dbSizeBeforeDefrag int64
defer func() {
_, err = oc.AsGuestKubeconf().Run("delete").Args("cm", "-n=default", "-l=foo=bar", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
o.Eventually(func() (done bool) {
// Check ETCD endpoint for DB size
dbSizeBeforeDefrag, _, _, err = hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
if dbSizeBeforeDefrag >= targetDbSize {
return true
}
// Create temporary file
f, err := os.CreateTemp(tmpDir, "ConfigMaps")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
if err = f.Close(); err != nil {
e2e.Logf("Error closing file %s: %v", f.Name(), err)
}
if err = os.Remove(f.Name()); err != nil {
e2e.Logf("Error removing file %s: %v", f.Name(), err)
}
}()
// Write resources to file.
// For a batch size of 500, the resources will occupy a bit more than 50 MB of space.
for i := 0; i < cmBatchSize; i++ {
_, err = f.WriteString(getCM())
o.Expect(err).NotTo(o.HaveOccurred())
}
err = f.Sync()
o.Expect(err).NotTo(o.HaveOccurred())
fs, err := f.Stat()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("File size = %d", fs.Size())
// Create all the resources on the guest cluster
// Omit countless lines of "XXX created" output
_, err = oc.AsGuestKubeconf().Run("create").Args("-f", f.Name()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return false
}).WithTimeout(LongTimeout).WithPolling(LongTimeout / 10).Should(o.BeTrue())
exutil.By("Deleting all ConfigMaps")
_, err = oc.AsGuestKubeconf().Run("delete").Args("cm", "-n=default", "-l=foo=bar").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Waiting until the fragmentation ratio is above threshold+margin")
o.Eventually(func() (done bool) {
_, _, dbFragRatio, err := hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
return dbFragRatio > etcdDefragThreshold+etcdDefragMargin
}).WithTimeout(LongTimeout).WithPolling(LongTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting until defragmentation is done which causes DB size to decrease")
o.Eventually(func() (done bool) {
dbSize, _, _, err := hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
return dbSize < dbSizeBeforeDefrag/int64(etcdDbContractionRate)
}).WithTimeout(DoubleLongTimeout).WithPolling(LongTimeout / 10).Should(o.BeTrue())
_, err = exutil.WaitAndGetSpecificPodLogs(oc, hcpNs, "etcd", "etcd-0", "defrag")
o.Expect(err).NotTo(o.HaveOccurred())
})
/*
Test environment requirements:
This test case runs against an STS management cluster which is not equipped with the root cloud credentials.
Interactions with the cloud provider must rely on a set of credentials that are unavailable on Jenkins agents.
Therefore, rehearsals have to be conducted locally.
Test run duration:
The CronJob created by the HCP controller is scheduled (hard-coded) to run at the beginning of each hour.
Consequently, the test run duration can vary between ~10 minutes and ~65min.
*/
g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-72055-Automated etcd backups for Managed services", func() {
// Skip incompatible platforms
// The etcd snapshots will be backed up to S3 so this test case runs on AWS only
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Skipping incompatible platform %s", iaasPlatform))
}
// The management cluster has to be an STS cluster as the SA token will be used to assume an existing AWS role
if !exutil.IsSTSCluster(oc) {
g.Skip("This test case must run on an STS management cluster, skipping")
}
// Restrict CPO's version to >= 4.16.0
// TODO(fxie): remove this once https://github.com/openshift/hypershift/pull/3034 gets merged and is included in the payload
hcVersion := exutil.GetHostedClusterVersion(oc, hostedcluster.name, hostedcluster.namespace)
e2e.Logf("Found hosted cluster version = %q", hcVersion)
hcVersion.Pre = nil
minHcVersion := semver.MustParse("4.16.0")
if hcVersion.LT(minHcVersion) {
g.Skip(fmt.Sprintf("The hosted cluster's version (%q) is too low, skipping", hcVersion))
}
var (
testCaseId = getTestCaseIDs()[0]
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
etcdBackupBucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
etcdBackupRoleName = fmt.Sprintf("%s-role", resourceNamePrefix)
etcdBackupRolePolicyArn = "arn:aws:iam::aws:policy/AmazonS3FullAccess"
hcpNs = hostedcluster.getHostedComponentNamespace()
adminKubeClient = oc.AdminKubeClient()
ctx = context.Background()
)
// It is impossible to rely on short-lived tokens like operators on the management cluster:
// there isn't a preexisting role with enough permissions for us to assume.
exutil.By("Getting an AWS session with credentials obtained from cluster profile")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
creds := credentials.NewSharedCredentials(getAWSPrivateCredentials(), "default")
var sess *session.Session
sess, err = session.NewSession(&aws.Config{
Credentials: creds,
Region: aws.String(region),
})
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Getting AWS account ID")
stsClient := exutil.NewDelegatingStsClient(sts.New(sess))
var getCallerIdOutput *sts.GetCallerIdentityOutput
getCallerIdOutput, err = stsClient.GetCallerIdentityWithContext(ctx, &sts.GetCallerIdentityInput{})
o.Expect(err).NotTo(o.HaveOccurred())
awsAcctId := aws.StringValue(getCallerIdOutput.Account)
e2e.Logf("Found AWS account ID = %s", awsAcctId)
exutil.By("Getting SA issuer of the management cluster")
saIssuer := doOcpReq(oc, OcpGet, true, "authentication/cluster", "-o=jsonpath={.spec.serviceAccountIssuer}")
// An OIDC provider's URL is prefixed with https://
saIssuerStripped := strings.TrimPrefix(saIssuer, "https://")
e2e.Logf("Found SA issuer of the management cluster = %s", saIssuerStripped)
exutil.By("Creating AWS role")
iamClient := exutil.NewDelegatingIAMClient(awsiam.New(sess))
var createRoleOutput *awsiam.CreateRoleOutput
createRoleOutput, err = iamClient.CreateRoleWithContext(ctx, &awsiam.CreateRoleInput{
RoleName: aws.String(etcdBackupRoleName),
AssumeRolePolicyDocument: aws.String(iamRoleTrustPolicyForEtcdBackup(awsAcctId, saIssuerStripped, hcpNs)),
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_, err = iamClient.DeleteRoleWithContext(ctx, &awsiam.DeleteRoleInput{
RoleName: aws.String(etcdBackupRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred())
}()
e2e.Logf("Attaching policy %s to role %s", etcdBackupRolePolicyArn, etcdBackupRoleName)
o.Expect(iamClient.AttachRolePolicy(etcdBackupRoleName, etcdBackupRolePolicyArn)).NotTo(o.HaveOccurred())
defer func() {
// Required for role deletion
o.Expect(iamClient.DetachRolePolicy(etcdBackupRoleName, etcdBackupRolePolicyArn)).NotTo(o.HaveOccurred())
}()
roleArn := aws.StringValue(createRoleOutput.Role.Arn)
exutil.By("Creating AWS S3 bucket")
s3Client := exutil.NewDelegatingS3Client(s3.New(sess))
o.Expect(s3Client.CreateBucket(etcdBackupBucketName)).NotTo(o.HaveOccurred())
defer func() {
// Required for bucket deletion
o.Expect(s3Client.EmptyBucketWithContextAndCheck(ctx, etcdBackupBucketName)).NotTo(o.HaveOccurred())
o.Expect(s3Client.DeleteBucket(etcdBackupBucketName)).NotTo(o.HaveOccurred())
}()
exutil.By("Creating CM/etcd-backup-config")
e2e.Logf("Found management cluster region = %s", region)
etcdBackupConfigCm := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-backup-config",
},
Data: map[string]string{
"bucket-name": etcdBackupBucketName,
"region": region,
"role-arn": roleArn,
},
}
_, err = adminKubeClient.CoreV1().ConfigMaps(hcpNs).Create(ctx, &etcdBackupConfigCm, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
defer doOcpReq(oc, OcpDelete, true, "cm/etcd-backup-config", "-n", hcpNs)
e2e.Logf("CM/etcd-backup-config created:\n%s", format.Object(etcdBackupConfigCm, 0))
exutil.By("Waiting for the etcd backup CronJob to be created")
o.Eventually(func() bool {
return oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("cronjob/etcd-backup", "-n", hcpNs).Execute() == nil
}).WithTimeout(DefaultTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting for the first job execution to be successful")
o.Eventually(func() bool {
lastSuccessfulTime, _, err := oc.AsAdmin().WithoutNamespace().Run(OcpGet).
Args("cronjob/etcd-backup", "-n", hcpNs, "-o=jsonpath={.status.lastSuccessfulTime}").Outputs()
return err == nil && len(lastSuccessfulTime) > 0
}).WithTimeout(70 * time.Minute).WithPolling(5 * time.Minute).Should(o.BeTrue())
exutil.By("Waiting for the backup to be uploaded")
o.Expect(s3Client.WaitForBucketEmptinessWithContext(ctx, etcdBackupBucketName,
exutil.BucketNonEmpty, 5*time.Second /* Interval */, 1*time.Minute /* Timeout */)).NotTo(o.HaveOccurred())
})
})
| package hypershift | ||||
test case | openshift/openshift-tests-private | 0ef46f15-e53c-4d99-ac4a-85400c3b713e | ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-42855-Check Status Conditions for HostedControlPlane | ['exutil "github.com/openshift/openshift-tests-private/test/extended/util"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-42855-Check Status Conditions for HostedControlPlane", func() {
rc := hostedcluster.checkHCConditions()
o.Expect(rc).Should(o.BeTrue())
// add more test here to check hypershift util
operatorNS := exutil.GetHyperShiftOperatorNameSpace(oc)
e2e.Logf("hosted cluster operator namespace %s", operatorNS)
o.Expect(operatorNS).NotTo(o.BeEmpty())
hostedclusterNS := exutil.GetHyperShiftHostedClusterNameSpace(oc)
e2e.Logf("hosted cluster namespace %s", hostedclusterNS)
o.Expect(hostedclusterNS).NotTo(o.BeEmpty())
guestClusterName, guestClusterKube, _ := exutil.ValidHypershiftAndGetGuestKubeConf(oc)
e2e.Logf("hostedclustercluster name %s", guestClusterName)
cv, err := oc.AsAdmin().SetGuestKubeconf(guestClusterKube).AsGuestKubeconf().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("hosted cluster clusterversion name %s", cv)
guestClusterName, guestClusterKube, _ = exutil.ValidHypershiftAndGetGuestKubeConfWithNoSkip(oc)
o.Expect(guestClusterName).NotTo(o.BeEmpty())
o.Expect(guestClusterKube).NotTo(o.BeEmpty())
cv, err = oc.AsAdmin().SetGuestKubeconf(guestClusterKube).AsGuestKubeconf().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("hosted cluster clusterversion with noskip api name %s", cv)
}) | |||||
test case | openshift/openshift-tests-private | 91a8b9ad-995f-491f-9fed-421d13275212 | Author:heli-ROSA-OSD_CCS-DEPRECATED-HyperShiftMGMT-Critical-43555-Allow direct ingress on guest clusters on AWS | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("Author:heli-ROSA-OSD_CCS-DEPRECATED-HyperShiftMGMT-Critical-43555-Allow direct ingress on guest clusters on AWS", func() {
var bashClient = NewCmdClient()
console, psw := hostedcluster.getHostedclusterConsoleInfo()
parms := fmt.Sprintf("curl -u admin:%s %s -k -LIs -o /dev/null -w %s ", psw, console, "%{http_code}")
res, err := bashClient.Run(parms).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
checkSubstring(res, []string{"200"})
}) | |||||
test case | openshift/openshift-tests-private | 45a54c37-e120-4497-bb00-be9cc19db4f7 | Author:heli-HyperShiftMGMT-Longduration-NonPreRelease-Critical-43272-Critical-43829-Test cluster autoscaler via hostedCluster autoScaling settings [Serial] | ['"path/filepath"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("Author:heli-HyperShiftMGMT-Longduration-NonPreRelease-Critical-43272-Critical-43829-Test cluster autoscaler via hostedCluster autoScaling settings [Serial]", func() {
if iaasPlatform != "aws" && iaasPlatform != "azure" {
g.Skip("Skip due to incompatible platform")
}
var (
npCount = 1
npName = "jz-43272-test-01"
autoScalingMax = "3"
autoScalingMin = "1"
workloadTemplate = filepath.Join(hypershiftTeamBaseDir, "workload.yaml")
parsedWorkloadFile = "ocp-43272-workload-template.config"
)
exutil.By("create a nodepool")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
switch iaasPlatform {
case "aws":
hostedcluster.createAwsNodePool(npName, npCount)
case "azure":
hostedcluster.createAdditionalAzureNodePool(npName, npCount)
}
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
o.Expect(hostedcluster.isNodepoolAutosaclingEnabled(npName)).Should(o.BeFalse())
exutil.By("enable the nodepool to be autoscaling")
hostedcluster.setNodepoolAutoScale(npName, autoScalingMax, autoScalingMin)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready after setting autoscaling error")
o.Expect(hostedcluster.isNodepoolAutosaclingEnabled(npName)).Should(o.BeTrue())
exutil.By("create a job as workload in the hosted cluster")
wl := workload{
name: "workload",
namespace: "default",
template: workloadTemplate,
}
defer wl.delete(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile)
wl.create(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile, "--local")
exutil.By("check nodepool is auto-scaled to max")
o.Eventually(hostedcluster.pollCheckNodepoolCurrentNodes(npName, autoScalingMax), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool autoscaling max error")
}) | |||||
test case | openshift/openshift-tests-private | 85552285-9ed8-4f3f-8a8f-b2fb778dc796 | ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-43554-Check FIPS support in the Hosted Cluster | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-43554-Check FIPS support in the Hosted Cluster", func() {
if !hostedcluster.isFIPEnabled() {
g.Skip("only for the fip enabled hostedcluster, skip test run")
}
o.Expect(hostedcluster.checkFIPInHostedCluster()).Should(o.BeTrue())
}) | ||||||
test case | openshift/openshift-tests-private | 92c4bca0-23e3-4b50-8551-d4451636cd17 | HyperShiftMGMT-ROSA-Author:heli-Critical-45770-Test basic fault resilient HA-capable etcd[Serial][Disruptive] | ['"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-ROSA-Author:heli-Critical-45770-Test basic fault resilient HA-capable etcd[Serial][Disruptive]", func() {
if !hostedcluster.isCPHighlyAvailable() {
g.Skip("this is for hosted cluster HA mode , skip test run")
}
//check etcd
antiAffinityJSONPath := ".spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution"
topologyKeyJSONPath := antiAffinityJSONPath + "[*].topologyKey"
desiredTopogyKey := "topology.kubernetes.io/zone"
etcdSts := "etcd"
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "statefulset", etcdSts, "-ojsonpath={"+antiAffinityJSONPath+"}")
res := doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "statefulset", etcdSts, "-ojsonpath={"+topologyKeyJSONPath+"}")
o.Expect(res).To(o.ContainSubstring(desiredTopogyKey))
//check etcd healthy
etcdCmd := "ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt " +
"--cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379"
etcdHealthCmd := etcdCmd + " endpoint health"
etcdStatusCmd := etcdCmd + " endpoint status"
for i := 0; i < 3; i++ {
res = doOcpReq(oc, OcpExec, true, "-n", controlplaneNS, "etcd-"+strconv.Itoa(i), "--", "sh", "-c", etcdHealthCmd)
o.Expect(res).To(o.ContainSubstring("localhost:2379 is healthy"))
}
for i := 0; i < 3; i++ {
etcdPodName := "etcd-" + strconv.Itoa(i)
res = doOcpReq(oc, OcpExec, true, "-n", controlplaneNS, etcdPodName, "--", "sh", "-c", etcdStatusCmd)
if strings.Contains(res, "false, false") {
e2e.Logf("find etcd follower etcd-%d, begin to delete this pod", i)
//delete the first follower
doOcpReq(oc, OcpDelete, true, "-n", controlplaneNS, "pod", etcdPodName)
//check the follower can be restarted and keep health
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
status := doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "pod", etcdPodName, "-ojsonpath={.status.phase}")
if status == "Running" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "etcd cluster health check error")
//check the follower pod running
status := doOcpReq(oc, OcpGet, true, "-n", controlplaneNS, "pod", etcdPodName, "-ojsonpath={.status.phase}")
o.Expect(status).To(o.ContainSubstring("Running"))
//check the follower health
execEtcdHealthCmd := append([]string{"-n", controlplaneNS, etcdPodName, "--", "sh", "-c"}, etcdHealthCmd)
res = doOcpReq(oc, OcpExec, true, execEtcdHealthCmd...)
o.Expect(res).To(o.ContainSubstring("localhost:2379 is healthy"))
break
}
}
}) | |||||
test case | openshift/openshift-tests-private | 476b43f7-b5a9-4bad-8e26-b98bc98a43af | Author:heli-HyperShiftMGMT-ROSA-Critical-45801-Critical-45821-Test fault resilient HA-capable etcd under network partition[Disruptive] | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("Author:heli-HyperShiftMGMT-ROSA-Critical-45801-Critical-45821-Test fault resilient HA-capable etcd under network partition[Disruptive]", func(ctx context.Context) {
if !hostedcluster.isCPHighlyAvailable() {
g.Skip("this is for hosted cluster HA mode , skip test run")
}
exutil.SkipOnAKSNess(ctx, oc, false)
g.By("find leader and get mapping between etcd pod name and node name")
etcdNodeMap := hostedcluster.getEtcdNodeMapping()
leader, followers, err := hostedcluster.getCPEtcdLeaderAndFollowers()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(len(followers) > 1).Should(o.BeTrue())
defer func() {
o.Eventually(func() bool {
return hostedcluster.isCPEtcdPodHealthy(followers[0])
}, ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("error: follower %s could not recoverd now", followers[0]))
o.Expect(hostedcluster.isCPEtcdPodHealthy(leader)).Should(o.BeTrue())
for i := 1; i < len(followers); i++ {
o.Expect(hostedcluster.isCPEtcdPodHealthy(followers[i])).Should(o.BeTrue())
}
}()
g.By("drop traffic from leader to follower")
defer func() {
debugNodeStdout, err := exutil.DebugNodeWithChroot(oc, etcdNodeMap[followers[0]], "iptables", "-t", "filter", "-D", "INPUT", "-s", etcdNodeMap[leader], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("recover traffic from leader %s to follower %s, debug output: %s", etcdNodeMap[leader], etcdNodeMap[followers[0]], debugNodeStdout)
}()
debugNodeStdout, err := exutil.DebugNodeWithChroot(oc, etcdNodeMap[followers[0]], "iptables", "-t", "filter", "-A", "INPUT", "-s", etcdNodeMap[leader], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("drop traffic debug output 1: %s", debugNodeStdout)
g.By("drop traffic from follower to leader")
defer func() {
debugNodeStdout, err := exutil.DebugNodeWithChroot(oc, etcdNodeMap[leader], "iptables", "-t", "filter", "-D", "INPUT", "-s", etcdNodeMap[followers[0]], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("recover traffic from follower %s to leader %s, debug output: %s", etcdNodeMap[followers[0]], etcdNodeMap[leader], debugNodeStdout)
}()
debugNodeStdout, err = exutil.DebugNodeWithChroot(oc, etcdNodeMap[leader], "iptables", "-t", "filter", "-A", "INPUT", "-s", etcdNodeMap[followers[0]], "-j", "DROP")
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("drop traffic debug output 2: %s", debugNodeStdout)
g.By("follower 0 should not be health again")
o.Eventually(func() bool {
return hostedcluster.isCPEtcdPodHealthy(followers[0])
}, ShortTimeout, ShortTimeout/10).Should(o.BeFalse(), fmt.Sprintf("error: follower %s should be unhealthy now", followers[0]))
g.By("leader should be running status and the rest of follower are still in the running status too")
o.Expect(hostedcluster.isCPEtcdPodHealthy(leader)).Should(o.BeTrue())
for i := 1; i < len(followers); i++ {
o.Expect(hostedcluster.isCPEtcdPodHealthy(followers[i])).Should(o.BeTrue())
}
g.By("check hosted cluster is still working")
o.Eventually(func() error {
_, err = hostedcluster.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args("node").Output()
return err
}, ShortTimeout, ShortTimeout/10).ShouldNot(o.HaveOccurred(), "error hosted cluster could not work any more")
g.By("ocp-45801 test passed")
}) | |||||
test case | openshift/openshift-tests-private | a489fb29-df48-4702-81c8-40cb9a16a277 | ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-46711-Test HCP components to use service account tokens | ['"encoding/base64"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/aws/aws-sdk-go/aws/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-46711-Test HCP components to use service account tokens", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 46711 is for AWS - skipping test ...")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
secretsWithCreds := []string{
"cloud-controller-creds",
"cloud-network-config-controller-creds",
"control-plane-operator-creds",
"ebs-cloud-credentials",
"node-management-creds",
}
for _, sec := range secretsWithCreds {
cre := doOcpReq(oc, OcpGet, true, "secret", sec, "-n", controlplaneNS, "-ojsonpath={.data.credentials}")
roleInfo, err := base64.StdEncoding.DecodeString(cre)
o.Expect(err).ShouldNot(o.HaveOccurred())
checkSubstring(string(roleInfo), []string{"role_arn", "web_identity_token_file"})
}
}) | |||||
test case | openshift/openshift-tests-private | 73c961ea-5c9b-42eb-90bf-a10df7684d1a | HyperShiftMGMT-Author:heli-Critical-44824-Resource requests/limit configuration for critical control plane workloads[Serial][Disruptive] | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:heli-Critical-44824-Resource requests/limit configuration for critical control plane workloads[Serial][Disruptive]", func() {
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
cpuRequest := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.cpu}`)
memoryRequest := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.memory}`)
e2e.Logf("cpu request: %s, memory request: %s\n", cpuRequest, memoryRequest)
defer func() {
//change back to original cpu, memory value
patchOptions := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"kube-apiserver","resources":{"requests":{"cpu":"%s", "memory": "%s"}}}]}}}}`, cpuRequest, memoryRequest)
doOcpReq(oc, OcpPatch, true, "deploy", "kube-apiserver", "-n", controlplaneNS, "-p", patchOptions)
//check new value of cpu, memory resource
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
cpuRes := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.cpu}`)
if cpuRes != cpuRequest {
return false, nil
}
memoryRes := doOcpReq(oc, OcpGet, true, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.memory}`)
if memoryRes != memoryRequest {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "kube-apiserver cpu & memory resource change back error")
}()
//change cpu, memory resources
desiredCPURequest := "200m"
desiredMemoryReqeust := "1700Mi"
patchOptions := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"kube-apiserver","resources":{"requests":{"cpu":"%s", "memory": "%s"}}}]}}}}`, desiredCPURequest, desiredMemoryReqeust)
doOcpReq(oc, OcpPatch, true, "deploy", "kube-apiserver", "-n", controlplaneNS, "-p", patchOptions)
//check new value of cpu, memory resource
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
cpuRes := doOcpReq(oc, OcpGet, false, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.cpu}`)
if cpuRes != desiredCPURequest {
return false, nil
}
memoryRes := doOcpReq(oc, OcpGet, false, "deployment", "kube-apiserver", "-n", controlplaneNS, `-ojsonpath={.spec.template.spec.containers[?(@.name=="kube-apiserver")].resources.requests.memory}`)
if memoryRes != desiredMemoryReqeust {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "kube-apiserver cpu & memory resource update error")
}) | |||||
test case | openshift/openshift-tests-private | 6700fdb4-8be5-4fef-8e9c-13eeda14a564 | ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-44926-Test priority classes for Hypershift control plane workloads | ['"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-44926-Test priority classes for Hypershift control plane workloads", func() {
if iaasPlatform != "aws" && iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44926 is for AWS or Azure - skipping test ...")
}
//deployment
priorityClasses := map[string][]string{
"hypershift-api-critical": {
"kube-apiserver",
"oauth-openshift",
"openshift-oauth-apiserver",
"openshift-apiserver",
"packageserver",
"ovnkube-control-plane",
},
//oc get deploy -n clusters-demo-02 -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.template.
//spec.priorityClassName}{"\n"}{end}' | grep hypershift-control-plane | awk '{print "\""$1"\""","}'
"hypershift-control-plane": {
"capi-provider",
"catalog-operator",
"cluster-api",
"cluster-autoscaler",
"cluster-image-registry-operator",
"cluster-network-operator",
"cluster-node-tuning-operator",
"cluster-policy-controller",
"cluster-storage-operator",
"cluster-version-operator",
"control-plane-operator",
"csi-snapshot-controller",
"csi-snapshot-controller-operator",
"csi-snapshot-webhook",
"dns-operator",
"hosted-cluster-config-operator",
"ignition-server",
"ingress-operator",
"konnectivity-agent",
"kube-controller-manager",
"kube-scheduler",
"machine-approver",
"multus-admission-controller",
"olm-operator",
"openshift-controller-manager",
"openshift-route-controller-manager",
"cloud-network-config-controller",
},
}
if hostedcluster.getOLMCatalogPlacement() == olmCatalogPlacementManagement {
priorityClasses["hypershift-control-plane"] = append(priorityClasses["hypershift-control-plane"], "certified-operators-catalog", "community-operators-catalog", "redhat-marketplace-catalog", "redhat-operators-catalog")
}
switch iaasPlatform {
case "aws":
priorityClasses["hypershift-control-plane"] = append(priorityClasses["hypershift-control-plane"], "aws-ebs-csi-driver-operator", "aws-ebs-csi-driver-controller")
case "azure":
priorityClasses["hypershift-control-plane"] = append(priorityClasses["hypershift-control-plane"], "azure-disk-csi-driver-controller", "azure-disk-csi-driver-operator", "azure-file-csi-driver-controller", "azure-file-csi-driver-operator", "azure-cloud-controller-manager")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
for priority, components := range priorityClasses {
e2e.Logf("priorityClass: %s %v\n", priority, components)
for _, c := range components {
res := doOcpReq(oc, OcpGet, true, "deploy", c, "-n", controlplaneNS, "-ojsonpath={.spec.template.spec.priorityClassName}")
o.Expect(res).To(o.Equal(priority))
}
}
//check statefulset for etcd
etcdSts := "etcd"
etcdPriorityClass := "hypershift-etcd"
res := doOcpReq(oc, OcpGet, true, "statefulset", etcdSts, "-n", controlplaneNS, "-ojsonpath={.spec.template.spec.priorityClassName}")
o.Expect(res).To(o.Equal(etcdPriorityClass))
}) | |||||
test case | openshift/openshift-tests-private | ba1fba1a-530e-4669-9a7c-c4bf3560e3b4 | HyperShiftMGMT-Author:heli-NonPreRelease-Longduration-Critical-44942-Enable control plane deployment restart on demand[Serial] | ['"fmt"', '"runtime"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:heli-NonPreRelease-Longduration-Critical-44942-Enable control plane deployment restart on demand[Serial]", func() {
res := doOcpReq(oc, OcpGet, false, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.metadata.annotations}")
e2e.Logf("get hostedcluster %s annotation: %s ", hostedcluster.name, res)
var cmdClient = NewCmdClient()
var restartDate string
var err error
systype := runtime.GOOS
if systype == "darwin" {
restartDate, err = cmdClient.Run("gdate --rfc-3339=date").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} else if systype == "linux" {
restartDate, err = cmdClient.Run("date --rfc-3339=date").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} else {
g.Skip("only available on linux or mac system")
}
annotationKey := "hypershift.openshift.io/restart-date"
//value to be annotated
restartAnnotation := fmt.Sprintf("%s=%s", annotationKey, restartDate)
//annotations to be verified
desiredAnnotation := fmt.Sprintf(`"%s":"%s"`, annotationKey, restartDate)
//delete if already has this annotation
existingAnno := doOcpReq(oc, OcpGet, false, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.metadata.annotations}")
e2e.Logf("get hostedcluster %s annotation: %s ", hostedcluster.name, existingAnno)
if strings.Contains(existingAnno, desiredAnnotation) {
removeAnno := annotationKey + "-"
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, removeAnno)
}
//add annotation
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, restartAnnotation)
e2e.Logf("set hostedcluster %s annotation %s done ", hostedcluster.name, restartAnnotation)
res = doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.metadata.annotations}")
e2e.Logf("get hostedcluster %s annotation: %s ", hostedcluster.name, res)
o.Expect(res).To(o.ContainSubstring(desiredAnnotation))
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
res = doOcpReq(oc, OcpGet, true, "deploy", "kube-apiserver", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.spec.template.metadata.annotations}")
if strings.Contains(res, desiredAnnotation) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "ocp-44942 hostedcluster restart annotation not found error")
}) | |||||
test case | openshift/openshift-tests-private | 70c539eb-6206-44b0-9f67-ab381e2ad268 | ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-44988-Colocate control plane components by default | ['"github.com/aws/aws-sdk-go/aws"', '"github.com/aws/aws-sdk-go/service/sts"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:heli-Critical-44988-Colocate control plane components by default", func() {
//deployment
controlplaneComponents := []string{
"kube-apiserver",
"oauth-openshift",
"openshift-oauth-apiserver",
"openshift-apiserver",
"packageserver",
"capi-provider",
"catalog-operator",
"cluster-api",
// ingore it for the Azure failure when checking the label hypershift.openshift.io/hosted-control-plane=clusters-{cluster-name}
//"cluster-autoscaler",
"cluster-image-registry-operator",
"cluster-network-operator",
"cluster-node-tuning-operator",
"cluster-policy-controller",
"cluster-storage-operator",
"cluster-version-operator",
"control-plane-operator",
"csi-snapshot-controller-operator",
"dns-operator",
"hosted-cluster-config-operator",
"ignition-server",
"ingress-operator",
"konnectivity-agent",
"kube-controller-manager",
"kube-scheduler",
"machine-approver",
"olm-operator",
"openshift-controller-manager",
"openshift-route-controller-manager",
//"cloud-network-config-controller",
"csi-snapshot-controller",
"csi-snapshot-webhook",
//"multus-admission-controller",
//"ovnkube-control-plane",
}
if hostedclusterPlatform == AWSPlatform {
controlplaneComponents = append(controlplaneComponents, []string{"aws-ebs-csi-driver-controller" /*"aws-ebs-csi-driver-operator"*/}...)
}
if hostedcluster.getOLMCatalogPlacement() == olmCatalogPlacementManagement {
controlplaneComponents = append(controlplaneComponents, "certified-operators-catalog", "community-operators-catalog", "redhat-marketplace-catalog", "redhat-operators-catalog")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
controlplanAffinityLabelKey := "hypershift.openshift.io/hosted-control-plane"
controlplanAffinityLabelValue := controlplaneNS
ocJsonpath := "-ojsonpath={.spec.template.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchLabels}"
for _, component := range controlplaneComponents {
res := doOcpReq(oc, OcpGet, true, "deploy", component, "-n", controlplaneNS, ocJsonpath)
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelKey))
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelValue))
}
res := doOcpReq(oc, OcpGet, true, "sts", "etcd", "-n", controlplaneNS, ocJsonpath)
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelKey))
o.Expect(res).To(o.ContainSubstring(controlplanAffinityLabelValue))
res = doOcpReq(oc, OcpGet, true, "pod", "-n", controlplaneNS, "-l", controlplanAffinityLabelKey+"="+controlplanAffinityLabelValue)
checkSubstring(res, controlplaneComponents)
}) | |||||
test case | openshift/openshift-tests-private | 07b596e3-b625-41fe-a86c-fea5cdae9c1c | HyperShiftMGMT-Author:heli-Critical-48025-Test EBS allocation for nodepool[Disruptive] | ['"fmt"', '"time"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:heli-Critical-48025-Test EBS allocation for nodepool[Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48025 is for AWS - skipping test ...")
}
g.By("create aws nodepools with specified root-volume-type, root-volume size and root-volume-iops")
var dftNodeCount = 1
volumeSizes := []int64{
64, 250, 512,
}
volumeIops := []int64{
4000, 6000,
}
awsConfigs := []struct {
nodepoolName string
rootVolumeSize *int64
rootVolumeType string
rootVolumeIOPS *int64
}{
{
nodepoolName: "jz-48025-01",
rootVolumeSize: &volumeSizes[0],
rootVolumeType: "gp2",
},
{
nodepoolName: "jz-48025-02",
rootVolumeSize: &volumeSizes[1],
rootVolumeType: "io1",
rootVolumeIOPS: &volumeIops[0],
},
{
nodepoolName: "jz-48025-03",
rootVolumeSize: &volumeSizes[2],
rootVolumeType: "io2",
rootVolumeIOPS: &volumeIops[1],
},
}
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
//delete nodepools simultaneously to save time
for _, cf := range awsConfigs {
hostedcluster.deleteNodePool(cf.nodepoolName)
}
for _, cf := range awsConfigs {
o.Eventually(hostedcluster.pollCheckDeletedNodePool(cf.nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}
}()
for _, cf := range awsConfigs {
NewAWSNodePool(cf.nodepoolName, hostedcluster.name, hostedcluster.namespace).
WithRootVolumeType(cf.rootVolumeType).
WithNodeCount(&dftNodeCount).
WithReleaseImage(releaseImage).
WithRootVolumeSize(cf.rootVolumeSize).
WithRootVolumeIOPS(cf.rootVolumeIOPS).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(cf.nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(),
fmt.Sprintf("nodepool %s ready error", cf.nodepoolName))
o.Expect(hostedcluster.checkAWSNodepoolRootVolumeType(cf.nodepoolName, cf.rootVolumeType)).To(o.BeTrue())
if cf.rootVolumeSize != nil {
o.Expect(hostedcluster.checkAWSNodepoolRootVolumeSize(cf.nodepoolName, *cf.rootVolumeSize)).To(o.BeTrue())
}
if cf.rootVolumeIOPS != nil {
o.Expect(hostedcluster.checkAWSNodepoolRootVolumeIOPS(cf.nodepoolName, *cf.rootVolumeIOPS)).To(o.BeTrue())
}
}
}) | |||||
test case | openshift/openshift-tests-private | 78cc1398-b3c5-4cba-bb8f-c7628a3b1e2e | HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-43553-Test MHC through nodePools[Disruptive] | ['"fmt"', '"path/filepath"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-43553-Test MHC through nodePools[Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 43553 is for AWS - skipping test ...")
}
g.By("create aws nodepool with replica 2")
npName := "43553np-" + strings.ToLower(exutil.RandStrDefault())
replica := 2
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&replica).
WithReleaseImage(releaseImage).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("enable autoRepair for the nodepool")
hostedcluster.setNodepoolAutoRepair(npName, "true")
o.Eventually(hostedcluster.pollCheckNodepoolAutoRepairEnabled(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s autoRepair enable error", npName))
g.By("find a hosted cluster node based on the nodepool")
labelFilter := "hypershift.openshift.io/nodePool=" + npName
nodes := hostedcluster.getHostedClusterNodeNameByLabelFilter(labelFilter)
o.Expect(nodes).ShouldNot(o.BeEmpty())
nodeName := strings.Split(nodes, " ")[0]
g.By("create a pod to kill kubelet in the corresponding node of the nodepool")
nsName := "guest-43553" + strings.ToLower(exutil.RandStrDefault())
defer doOcpReq(oc, "delete", true, "ns", nsName, "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
doOcpReq(oc, "create", true, "ns", nsName, "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
doOcpReq(oc, "label", true, "ns/"+nsName, "security.openshift.io/scc.podSecurityLabelSync=false", "pod-security.kubernetes.io/enforce=privileged", "pod-security.kubernetes.io/audit=privileged", "pod-security.kubernetes.io/warn=privileged", "--overwrite", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
kubeletKillerTemplate := filepath.Join(hypershiftTeamBaseDir, "kubelet-killer.yaml")
kk := kubeletKiller{
Name: "kubelet-killer-43553",
Namespace: nsName,
NodeName: nodeName,
Template: kubeletKillerTemplate,
}
//create kubelet-killer pod to kill kubelet
parsedWorkloadFile := "ocp-43553-kubelet-killer-template.config"
defer kk.delete(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile)
kk.create(oc, hostedcluster.getHostedClusterKubeconfigFile(), parsedWorkloadFile)
o.Eventually(hostedcluster.pollCheckNodeHealthByMHC(npName), ShortTimeout, ShortTimeout/10).ShouldNot(o.BeTrue(), fmt.Sprintf("mhc %s check failed", npName))
status := hostedcluster.getHostedClusterNodeReadyStatus(nodeName)
o.Expect(status).ShouldNot(o.BeEmpty())
//firstly the node status will be Unknown
o.Expect(status).ShouldNot(o.ContainSubstring("True"))
g.By("check if a new node is provisioned eventually")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(npName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(replica), fmt.Sprintf("node pool %s: not expected ready node number error", npName))
g.By("disable autoRepair")
hostedcluster.setNodepoolAutoRepair(npName, "false")
o.Eventually(hostedcluster.pollCheckNodepoolAutoRepairDisabled(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s autoRepair disable error", npName))
}) | |||||
test case | openshift/openshift-tests-private | 8f0a1b42-707f-4c69-a094-0f34205c090e | HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-48392-NodePool controller updates existing awsmachinetemplate when MachineDeployment rolled out[Serial][Disruptive] | ['"fmt"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-48392-NodePool controller updates existing awsmachinetemplate when MachineDeployment rolled out[Serial][Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48392 is for AWS - skipping test ...")
}
g.By("create aws nodepool with replica 2")
npName := "jz-48392-01"
replica := 2
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&replica).
WithReleaseImage(releaseImage).
WithInstanceType("m5.large").
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("update nodepool instance type and check the change")
expectedInstanceType := "m5.xlarge"
hostedcluster.setAWSNodepoolInstanceType(npName, expectedInstanceType)
o.Eventually(hostedcluster.pollCheckAWSNodepoolInstanceType(npName, expectedInstanceType), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s check instance type error", npName))
// check default rolling upgrade of instanceType
upgradeType := hostedcluster.getNodepoolUpgradeType(npName)
o.Expect(upgradeType).Should(o.ContainSubstring("Replace"))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeIntermediateStatus(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s check replace upgrade intermediate state error", npName))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeComplete(npName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s check replace upgrade complete state error", npName))
o.Expect(hostedcluster.checkNodepoolHostedClusterNodeInstanceType(npName)).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 73a5efc5-ce1b-45cf-a6a6-ff4d9d1b697e | HyperShiftMGMT-NonPreRelease-Longduration-Author:mihuang-Critical-48673-Unblock node deletion-draining timeout[Serial] | ['"fmt"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:mihuang-Critical-48673-Unblock node deletion-draining timeout[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48673 is for AWS - skipping test ...")
}
controlplaneNS := hostedcluster.namespace + "-" + hostedcluster.name
g.By("create aws nodepool with replica 1")
npName := "48673np-" + strings.ToLower(exutil.RandStrDefault())
replica := 1
releaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.release.image}")
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&replica).
WithReleaseImage(releaseImage).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("Get the awsmachines name")
awsMachines := doOcpReq(oc, OcpGet, true, "awsmachines", "-n", hostedcluster.namespace+"-"+hostedcluster.name, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].metadata.name}'`, hostedcluster.namespace, npName))
e2e.Logf("awsMachines: %s", awsMachines)
g.By("Set nodeDrainTimeout to 1m")
drainTime := "1m"
doOcpReq(oc, OcpPatch, true, "nodepools", npName, "-n", hostedcluster.namespace, "-p", fmt.Sprintf(`{"spec":{"nodeDrainTimeout":"%s"}}`, drainTime), "--type=merge")
o.Expect("True").To(o.Equal(doOcpReq(oc, OcpGet, true, "nodepool", npName, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="Ready")].status}`)))
g.By("check machinedeployment and machines")
mdDrainTimeRes := doOcpReq(oc, OcpGet, true, "machinedeployment", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mdDrainTimeRes).To(o.ContainSubstring(drainTime))
g.By("check machines.cluster.x-k8s.io")
mDrainTimeRes := doOcpReq(oc, OcpGet, true, "machines.cluster.x-k8s.io", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mDrainTimeRes).To(o.ContainSubstring(drainTime))
g.By("Check the guestcluster podDisruptionBudget are not be deleted")
pdbNameSpaces := []string{"openshift-console", "openshift-image-registry", "openshift-ingress", "openshift-monitoring", "openshift-operator-lifecycle-manager"}
for _, pdbNameSpace := range pdbNameSpaces {
o.Expect(doOcpReq(oc, OcpGet, true, "podDisruptionBudget", "-n", pdbNameSpace, "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)).ShouldNot(o.BeEmpty())
}
g.By("Scale the nodepool to 0")
doOcpReq(oc, OcpScale, true, "nodepool", npName, "-n", hostedcluster.namespace, "--replicas=0")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(npName), LongTimeout, LongTimeout/10).Should(o.Equal(0), fmt.Sprintf("nodepool are not scale down to 0 in hostedcluster %s", hostedcluster.name))
g.By("Scale the nodepool to 1")
doOcpReq(oc, OcpScale, true, "nodepool", npName, "-n", hostedcluster.namespace, "--replicas=1")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(npName), LongTimeout, LongTimeout/10).Should(o.Equal(1), fmt.Sprintf("nodepool are not scale down to 1 in hostedcluster %s", hostedcluster.name))
g.By("check machinedeployment and machines")
mdDrainTimeRes = doOcpReq(oc, OcpGet, true, "machinedeployment", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mdDrainTimeRes).To(o.ContainSubstring(drainTime))
g.By("check machines.cluster.x-k8s.io")
mDrainTimeRes = doOcpReq(oc, OcpGet, true, "machines.cluster.x-k8s.io", "--ignore-not-found", "-n", controlplaneNS, fmt.Sprintf(`-ojsonpath='{.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.nodeDrainTimeout}'`, hostedcluster.namespace, npName))
o.Expect(mDrainTimeRes).To(o.ContainSubstring(drainTime))
}) | |||||
test case | openshift/openshift-tests-private | 2c9400a2-3729-41c6-b98e-bd5195627300 | ROSA-OSD_CCS-HyperShiftMGMT-Author:mihuang-Critical-48936-Test HyperShift cluster Infrastructure TopologyMode | ['"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("ROSA-OSD_CCS-HyperShiftMGMT-Author:mihuang-Critical-48936-Test HyperShift cluster Infrastructure TopologyMode", func() {
controllerAvailabilityPolicy := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.controllerAvailabilityPolicy}")
e2e.Logf("controllerAvailabilityPolicy is: %s", controllerAvailabilityPolicy)
if iaasPlatform == "aws" {
o.Expect(doOcpReq(oc, OcpGet, true, "infrastructure", "-ojsonpath={.items[*].status.controlPlaneTopology}")).Should(o.Equal(controllerAvailabilityPolicy))
}
o.Expect(doOcpReq(oc, OcpGet, true, "infrastructure", "-ojsonpath={.items[*].status.controlPlaneTopology}", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)).Should(o.Equal("External"))
}) | |||||
test case | openshift/openshift-tests-private | 6af9cad3-c8c8-43b2-bb19-5be0ff5443f1 | HyperShiftMGMT-NonPreRelease-Longduration-Author:mihuang-Critical-49436-Test Nodepool conditions[Serial] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"k8s.io/kubernetes/test/utils/format"', 'exutil "github.com/openshift/openshift-tests-private/test/extended/util"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:mihuang-Critical-49436-Test Nodepool conditions[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49436 is for AWS - skipping test ...")
}
g.By("Create nodepool and check nodepool conditions in progress util ready")
caseID := "49436"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
replica := 1
npNameInPlace := "49436np-inplace-" + strings.ToLower(exutil.RandStrDefault())
npNameReplace := "49436np-replace-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(npNameInPlace)
defer hostedcluster.deleteNodePool(npNameReplace)
hostedcluster.createAwsNodePool(npNameReplace, replica)
hostedcluster.createAwsInPlaceNodePool(npNameInPlace, replica, dir)
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameInPlace, []nodePoolCondition{{"Ready", "reason", "ScalingUp"}}), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "in place nodepool ready error")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameReplace, []nodePoolCondition{{"Ready", "reason", "WaitingForAvailableMachines"}, {"UpdatingConfig", "status", "True"}, {"UpdatingVersion", "status", "True"}}), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "replace nodepool ready error")
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npNameInPlace), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npNameReplace), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
hostedcluster.checkNodepoolAllConditions(npNameInPlace)
hostedcluster.checkNodepoolAllConditions(npNameReplace)
g.By("Set nodepool autoscaling, autorepair, and invaild payload image verify nodepool conditions should correctly generate")
hostedcluster.setNodepoolAutoScale(npNameReplace, "3", "1")
hostedcluster.setNodepoolAutoRepair(npNameReplace, "true")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameReplace, []nodePoolCondition{{"AutoscalingEnabled", "message", "Maximum nodes: 3, Minimum nodes: 1"}, {"AutorepairEnabled", "status", "True"}, {"ValidReleaseImage", "status", "True"}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
doOcpReq(oc, OcpPatch, true, "nodepools", npNameReplace, "-n", hostedcluster.namespace, "--type=merge", fmt.Sprintf(`--patch={"spec": {"replicas": 2}}`))
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameReplace, []nodePoolCondition{{"AutoscalingEnabled", "message", "only one of nodePool.Spec.Replicas or nodePool.Spec.AutoScaling can be set"}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
g.By("upgrade nodepool payload InPlace, enable autoscaling and autorepair verify nodepool conditions should correctly generate")
image := hostedcluster.getCPReleaseImage()
hostedcluster.checkNodepoolAllConditions(npNameInPlace)
hostedcluster.upgradeNodepoolPayloadInPlace(npNameInPlace, "quay.io/openshift-release-dev/ocp-release:quay.io/openshift-release-dev/ocp-release:4.13.0-ec.1-x86_64")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameInPlace, []nodePoolCondition{{"ValidReleaseImage", "message", "invalid reference format"}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
hostedcluster.upgradeNodepoolPayloadInPlace(npNameInPlace, image)
hostedcluster.setNodepoolAutoScale(npNameInPlace, "6", "3")
hostedcluster.setNodepoolAutoRepair(npNameInPlace, "true")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameInPlace, []nodePoolCondition{{"Ready", "reason", "ScalingUp"}, {"AutoscalingEnabled", "message", "Maximum nodes: 6, Minimum nodes: 3"}, {"AutorepairEnabled", "status", "True"}}), LongTimeout, LongTimeout/30).Should(o.BeTrue(), "nodepool in progress error")
g.By("create nodepool with minversion and verify nodepool condition")
npNameMinVersion := "49436np-minversion-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(npNameMinVersion)
NewAWSNodePool(npNameMinVersion, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&replica).WithReleaseImage("quay.io/openshift-release-dev/ocp-release:4.10.45-x86_64").CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckNodePoolConditions(npNameMinVersion, []nodePoolCondition{{"ValidReleaseImage", "message", getMinSupportedOCPVersion()}}), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
}) | |||||
test case | openshift/openshift-tests-private | 8b3f64d3-a59b-4de6-adbe-db82c79167ef | HyperShiftMGMT-Author:liangli-Critical-54284-Hypershift creates extra EC2 instances | ['"fmt"', '"github.com/aws/aws-sdk-go/aws"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:liangli-Critical-54284-Hypershift creates extra EC2 instances", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 54284 is for AWS - skipping test ...")
}
autoCreatedForInfra := doOcpReq(oc, OcpGet, true, "nodepool", "-n", hostedcluster.namespace, fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].metadata.labels.hypershift\.openshift\.io/auto-created-for-infra}`, hostedcluster.name))
e2e.Logf("autoCreatedForInfra:" + autoCreatedForInfra)
nodepoolName := doOcpReq(oc, OcpGet, true, "nodepool", "-n", hostedcluster.namespace, fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].metadata.name}`, hostedcluster.name))
e2e.Logf("nodepoolName:" + nodepoolName)
additionalTags := doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, nodepoolName, fmt.Sprintf(`-ojsonpath={.spec.template.spec.additionalTags.kubernetes\.io/cluster/%s}`, autoCreatedForInfra))
o.Expect(additionalTags).Should(o.ContainSubstring("owned"))
generation := doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, nodepoolName, `-ojsonpath={.metadata.generation}`)
o.Expect(generation).Should(o.Equal("1"))
}) | |||||
test case | openshift/openshift-tests-private | f2c8adb0-8849-4f76-9c2e-3f47dfb67959 | HyperShiftMGMT-Longduration-NonPreRelease-Author:liangli-Critical-54551-Reconcile NodePool label against Nodes[Disruptive] | ['"fmt"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:liangli-Critical-54551-Reconcile NodePool label against Nodes[Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 54551 is for AWS - skipping test ...")
}
replica := 1
nodepoolName := "54551np-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(nodepoolName)
hostedcluster.createAwsNodePool(nodepoolName, replica)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(nodepoolName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
g.By("Check if the nodepool name is propagated from the nodepool to the machine annotation")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "awsmachines", "-n", hostedcluster.namespace+"-"+hostedcluster.name, `-ojsonpath={.items[*].metadata.annotations.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replica))
g.By("Check if the nodepool name is propagated from machine to node label")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "node", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile, `-ojsonpath={.items[*].metadata.labels.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replica))
g.By("Scale up the nodepool")
replicasIntNew := replica + 1
defer func() {
doOcpReq(oc, OcpScale, true, "nodepool", "-n", hostedcluster.namespace, nodepoolName, fmt.Sprintf("--replicas=%d", replica))
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(nodepoolName), LongTimeout, LongTimeout/10).Should(o.Equal(replica), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedcluster.name))
}()
doOcpReq(oc, OcpScale, true, "nodepool", "-n", hostedcluster.namespace, nodepoolName, fmt.Sprintf("--replicas=%d", replicasIntNew))
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(nodepoolName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(replicasIntNew), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedcluster.name))
g.By("Check if the nodepool name is propagated from the nodepool to the machine annotation")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "awsmachines", "-n", hostedcluster.namespace+"-"+hostedcluster.name, `-ojsonpath={.items[*].metadata.annotations.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replicasIntNew))
g.By("Check if the nodepool name is propagated from machine to node label")
o.Expect(strings.Count(doOcpReq(oc, OcpGet, true, "node", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile, `-ojsonpath={.items[*].metadata.labels.hypershift\.openshift\.io/nodePool}`), nodepoolName)).Should(o.Equal(replicasIntNew))
}) | |||||
test case | openshift/openshift-tests-private | 62807e50-4320-4860-b56c-6390466bd7e3 | Author:mihuang-ROSA-OSD_CCS-HyperShiftMGMT-Longduration-NonPreRelease-Critical-49108-Critical-49499-Critical-59546-Critical-60490-Critical-61970-Separate client certificate trust from the global hypershift CA | ['"context"', '"encoding/base64"', '"fmt"', '"os"', '"strings"', 'g "github.com/onsi/ginkgo/v2"', '"github.com/aws/aws-sdk-go/aws"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("Author:mihuang-ROSA-OSD_CCS-HyperShiftMGMT-Longduration-NonPreRelease-Critical-49108-Critical-49499-Critical-59546-Critical-60490-Critical-61970-Separate client certificate trust from the global hypershift CA", func(ctx context.Context) {
if iaasPlatform != "aws" && iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49108 is for AWS or Azure - For other platforms, please set the corresponding expectedMetric to make this case effective. Skipping test ...")
}
exutil.SkipOnAKSNess(ctx, oc, false)
g.By("OCP-61970: OCPBUGS-10792-Changing the api group of the hypershift namespace servicemonitor back to coreos.com")
o.Expect(doOcpReq(oc, OcpGet, true, "servicemonitor", "-n", "hypershift", "-ojsonpath={.items[*].apiVersion}")).Should(o.ContainSubstring("coreos.com"))
g.By("Add label to namespace enable monitoring for hosted control plane component.")
defer doOcpReq(oc, "label", true, "namespace", hostedcluster.namespace+"-"+hostedcluster.name, "openshift.io/cluster-monitoring-")
doOcpReq(oc, "label", true, "namespace", hostedcluster.namespace+"-"+hostedcluster.name, "openshift.io/cluster-monitoring=true", "--overwrite=true")
g.By("OCP-49499 && 49108 Check metric works well for the hosted control plane component.")
o.Expect(doOcpReq(oc, OcpGet, true, "ns", hostedcluster.namespace+"-"+hostedcluster.name, "--show-labels")).Should(o.ContainSubstring("openshift.io/cluster-monitoring=true"))
serviceMonitors := strings.Split(doOcpReq(oc, OcpGet, true, "servicemonitors", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.items[*].metadata.name}"), " ")
o.Expect(serviceMonitors).ShouldNot(o.BeEmpty())
podMonitors := strings.Split(doOcpReq(oc, OcpGet, true, "podmonitors", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.items[*].metadata.name}"), " ")
o.Expect(podMonitors).ShouldNot(o.BeEmpty())
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
return strings.Contains(doOcpReq(oc, OcpExec, true, "-n", "openshift-monitoring", "prometheus-k8s-0", "-c", "prometheus", "--", "sh", "-c", fmt.Sprintf(" curl -k -g -H \"Authorization: Bearer %s\" https://thanos-querier.openshift-monitoring.svc:9091/api/v1/alerts", token)), `"status":"success"`)
}, 5*LongTimeout, LongTimeout/5).Should(o.BeTrue(), fmt.Sprintf("not all metrics in hostedcluster %s are ready", hostedcluster.name))
o.Eventually(func() bool {
metricsOutput, err := oc.AsAdmin().Run("exec").Args("-n", "openshift-monitoring", "prometheus-k8s-0", "-c", "prometheus", "--", "sh", "-c", fmt.Sprintf("curl -sS --cacert /etc/prometheus/certs/configmap_%s_root-ca_ca.crt --key /etc/prometheus/certs/secret_%s_metrics-client_tls.key --cert /etc/prometheus/certs/secret_%s_metrics-client_tls.crt https://openshift-apiserver.%s.svc/metrics", hostedcluster.namespace+"-"+hostedcluster.name, hostedcluster.namespace+"-"+hostedcluster.name, hostedcluster.namespace+"-"+hostedcluster.name, hostedcluster.namespace+"-"+hostedcluster.name)).Output()
if err != nil {
return false
}
var expectedMetric string
switch iaasPlatform {
case "aws":
expectedMetric = "# HELP aggregator_openapi_v2_regeneration_count [ALPHA] Counter of OpenAPI v2 spec regeneration count broken down by causing APIService name and reason."
case "azure":
expectedMetric = "# HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated"
}
return strings.Contains(metricsOutput, expectedMetric)
}, 5*LongTimeout, LongTimeout/5).Should(o.BeTrue(), fmt.Sprintf("not all metrics in hostedcluster %s are ready", hostedcluster.name))
g.By("OCP-49499 Check the clusterID is exist")
o.Expect(doOcpReq(oc, OcpGet, true, "hostedclusters", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.spec.clusterID}")).ShouldNot(o.BeEmpty())
g.By("OCP-49499 Check the clusterID label in serviceMonitors/podMonitors and target is up")
o.Expect(doOcpReq(oc, OcpExec, true, "-n", "openshift-monitoring", "prometheus-k8s-0", "-c", "prometheus", "--", "sh", "-c", `curl -k -H "Authorization: Bearer `+token+`" https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`)).Should(o.ContainSubstring("up"))
for _, serviceMonitor := range serviceMonitors {
o.Expect(doOcpReq(oc, OcpGet, true, "servicemonitors", serviceMonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.spec.endpoints[?(@.relabelings)]}")).Should(o.ContainSubstring(`"targetLabel":"_id"`))
o.Expect(doOcpReq(oc, OcpGet, true, "servicemonitors", serviceMonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.apiVersion}")).Should(o.ContainSubstring("coreos.com"))
}
for _, podmonitor := range podMonitors {
o.Expect(doOcpReq(oc, OcpGet, true, "podmonitors", podmonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.spec.podMetricsEndpoints[?(@.relabelings)]}")).Should(o.ContainSubstring(`"targetLabel":"_id"`))
o.Expect(doOcpReq(oc, OcpGet, true, "podmonitors", podmonitor, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-ojsonpath={.apiVersion}")).Should(o.ContainSubstring("coreos.com"))
}
g.By("OCP-59546 Export HostedCluster metrics")
hostedClusterMetricsName := []string{"hypershift_cluster_available_duration_seconds", "hypershift_cluster_deletion_duration_seconds", "hypershift_cluster_guest_cloud_resources_deletion_duration_seconds", "hypershift_cluster_identity_providers", "hypershift_cluster_initial_rollout_duration_seconds", "hypershift_cluster_limited_support_enabled", "hypershift_cluster_proxy", "hypershift_hostedclusters", "hypershift_hostedclusters_failure_conditions", "hypershift_hostedcluster_nodepools", "hypershift_nodepools", "hypershift_nodepools_failure_conditions", "hypershift_nodepools_size"}
hypershiftOperatorPodName := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", "hypershift", "-l", "app=operator", `-ojsonpath={.items[*].metadata.name}`), " ")
var metrics []string
for _, podName := range hypershiftOperatorPodName {
for _, name := range hostedClusterMetricsName {
if strings.Contains(doOcpReq(oc, OcpExec, true, "-n", "hypershift", podName, "--", "curl", "0.0.0.0:9000/metrics"), name) {
metrics = append(metrics, name)
}
}
}
e2e.Logf("metrics: %v is exported by hypershift operator", metrics)
g.By("OCP-60490 Verify that cert files not been modified")
dirname := "/tmp/kube-root-60490"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
doOcpReq(oc, "cp", true, "-n", "openshift-console", doOcpReq(oc, OcpGet, true, "pod", "-n", "openshift-console", "-o", "jsonpath={.items[0].metadata.name}")+":"+fmt.Sprintf("/var/run/secrets/kubernetes.io/serviceaccount/..data/ca.crt"), dirname+"/serviceaccount_ca.crt")
doOcpReq(oc, "extract", true, "cm/kube-root-ca.crt", "-n", "openshift-console", "--to="+dirname, "--confirm")
var bashClient = NewCmdClient().WithShowInfo(true)
md5Value1, err := bashClient.Run(fmt.Sprintf("md5sum %s | awk '{print $1}'", dirname+"/serviceaccount_ca.crt")).Output()
o.Expect(err).NotTo(o.HaveOccurred())
md5Value2, err := bashClient.Run(fmt.Sprintf("md5sum %s | awk '{print $1}'", dirname+"/ca.crt")).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(md5Value1).Should(o.Equal(md5Value2))
g.By("Verify that client certificate trust is separated from the global Hypershift CA")
o.Expect(bashClient.Run(fmt.Sprintf("grep client-certificate-data %s | grep -Eo \"[^ ]+$\" | base64 -d > %s", os.Getenv("KUBECONFIG"), dirname+"/system-admin_client.crt")).Output()).Should(o.BeEmpty())
res1, err := bashClient.Run(fmt.Sprintf("openssl verify -CAfile %s %s", dirname+"/serviceaccount_ca.crt", dirname+"/system-admin_client.crt")).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(res1).Should(o.ContainSubstring(fmt.Sprintf("error %s: verification failed", dirname+"/system-admin_client.crt")))
res2, err := bashClient.Run(fmt.Sprintf("openssl verify -CAfile %s %s", dirname+"/ca.crt", dirname+"/system-admin_client.crt")).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(res2).Should(o.ContainSubstring(fmt.Sprintf("error %s: verification failed", dirname+"/system-admin_client.crt")))
}) | |||||
test case | openshift/openshift-tests-private | 097668f5-978b-4c97-9f70-b61e3a1de489 | HyperShiftMGMT-Longduration-NonPreRelease-Author:mihuang-Critical-60744-Better signal for NodePool inability to talk to management side [Disruptive] [Flaky] | ['"fmt"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:mihuang-Critical-60744-Better signal for NodePool inability to talk to management side [Disruptive] [Flaky]", func() {
g.By("Create a nodepool to verify that NodePool inability to talk to management side")
if hostedclusterPlatform != "aws" {
g.Skip("HostedCluster platform is " + hostedclusterPlatform + " which is not supported in this test.")
}
replica := 1
nodepoolName := "60744np-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(nodepoolName)
hostedcluster.createAwsNodePool(nodepoolName, replica)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(nodepoolName), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
o.Expect(hostedcluster.checkNodePoolConditions(nodepoolName, []nodePoolCondition{{"ReachedIgnitionEndpoint", "status", "True"}})).Should(o.BeTrue(), "nodepool ready error")
g.By("Check if metric 'ign_server_get_request' is exposed for nodepool by ignition server")
o.Expect(strings.Contains(doOcpReq(oc, "logs", true, "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-l", "app=ignition-server"), "ignition")).Should(o.BeTrue())
ignitionPodNameList := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "-o", `jsonpath={.items[?(@.metadata.labels.app=="ignition-server")].metadata.name}`), " ")
foundMetric := false
for _, ignitionPodName := range ignitionPodNameList {
if strings.Contains(doOcpReq(oc, OcpExec, true, "-n", hostedcluster.namespace+"-"+hostedcluster.name, ignitionPodName, "--", "curl", "0.0.0.0:8080/metrics"), fmt.Sprintf(`ign_server_get_request{nodePool="clusters/%s"}`, nodepoolName)) {
foundMetric = true
break
}
}
o.Expect(foundMetric).Should(o.BeTrue(), "ignition server get request metric not found")
g.By("Modify ACL on VPC, deny all inbound and outbound traffic")
vpc := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", "jsonpath={.spec.platform.aws.cloudProviderConfig.vpc}")
region := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", "jsonpath={.spec.platform.aws.region}")
var bashClient = NewCmdClient().WithShowInfo(true)
acl, err := bashClient.Run(fmt.Sprintf(`aws ec2 describe-network-acls --filters Name=vpc-id,Values=%s --query 'NetworkAcls[].NetworkAclId' --region %s --output text`, vpc, region)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(acl).Should(o.ContainSubstring("acl-"))
defer func() {
bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --ingress --rule-number 100 --protocol -1 --rule-action allow --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
}()
cmdOutDeny1, err := bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --ingress --rule-number 100 --protocol -1 --rule-action deny --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmdOutDeny1).Should(o.BeEmpty())
defer func() {
bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --egress --rule-number 100 --protocol -1 --rule-action allow --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
}()
cmdOutDeny2, err := bashClient.Run(fmt.Sprintf(`aws ec2 replace-network-acl-entry --network-acl-id %s --egress --rule-number 100 --protocol -1 --rule-action deny --cidr-block 0.0.0.0/0 --region %s`, acl, region)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmdOutDeny2).Should(o.BeEmpty())
g.By("Check metric 'ign_server_get_request' is not exposed for nodepool by ignition server after ACL modification")
nodepoolName1 := "60744np-1-" + strings.ToLower(exutil.RandStrDefault())
defer hostedcluster.deleteNodePool(nodepoolName1)
hostedcluster.createAwsNodePool(nodepoolName1, replica)
hostedcluster.setNodepoolAutoRepair(nodepoolName1, "true")
o.Eventually(hostedcluster.pollCheckNodePoolConditions(nodepoolName1, []nodePoolCondition{{"ReachedIgnitionEndpoint", "status", "False"}, {"AutorepairEnabled", "status", "False"}}), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.BeTrue(), "nodepool in progress error")
for _, ignitionPodName := range ignitionPodNameList {
o.Expect(doOcpReq(oc, OcpExec, true, "-n", hostedcluster.namespace+"-"+hostedcluster.name, ignitionPodName, "--", "curl", "0.0.0.0:8080/metrics")).ShouldNot(o.ContainSubstring(fmt.Sprintf(`ign_server_get_request{nodePool="clusters/%s"}`, nodepoolName1)))
}
}) | |||||
test case | openshift/openshift-tests-private | 2534ca68-e667-474e-8cdb-c81679ca88c4 | HyperShiftMGMT-Author:mihuang-Critical-60903-Test must-gather on the hostedcluster | ['"fmt"', '"os"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:mihuang-Critical-60903-Test must-gather on the hostedcluster", func() {
mustgatherDir := "/tmp/must-gather-60903"
defer os.RemoveAll(mustgatherDir)
err := os.MkdirAll(mustgatherDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check must-gather works well on the hostedcluster.")
err = oc.AsGuestKubeconf().Run(OcpAdm).Args("must-gather", "--dest-dir="+mustgatherDir, "--", "/usr/bin/gather_audit_logs").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred(), "error running must-gather against the HC")
var bashClient = NewCmdClient().WithShowInfo(true)
cmdOut, err := bashClient.Run(fmt.Sprintf(`du -h %v`, mustgatherDir)).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(cmdOut).ShouldNot(o.Equal("0B"))
}) | |||||
test case | openshift/openshift-tests-private | f8b5b6ca-9b95-4f1b-a78b-7311ab90bb3a | HyperShiftMGMT-Author:mihuang-Critical-61604-Validate network input and signal in hyperv1.ValidHostedClusterConfiguration[Disruptive] | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:mihuang-Critical-61604-Validate network input and signal in hyperv1.ValidHostedClusterConfiguration[Disruptive]", func() {
g.By("Patch hostedcluster to set network to invalid value and check the ValidConfiguration conditions of hostedcluster CR")
clusterNetworkCidr := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", `jsonpath={.spec.networking.clusterNetwork[0].cidr}`)
defer doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"networking":{"clusterNetwork":[{"cidr": "`+clusterNetworkCidr+`"}]}}}`)
doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"networking":{"clusterNetwork":[{"cidr": "172.31.0.0/16"}]}}}`)
o.Eventually(func() bool {
if strings.Contains(doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-o", `jsonpath={.status.conditions[?(@.type=="ValidConfiguration")].reason}`), "InvalidConfiguration") {
return true
}
return false
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "conditions are not changed")
}) | |||||
test case | openshift/openshift-tests-private | 74280d81-d55e-4f6c-973d-0e26091a95a4 | HyperShiftMGMT-Author:mihuang-Critical-62195-Add validation for taint.value in nodePool[Serial][Disruptive] | ['"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:mihuang-Critical-62195-Add validation for taint.value in nodePool[Serial][Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 62195 is for AWS - skipping test ...")
}
g.By("Create a nodepool with invalid taint value and check the ValidConfiguration conditions of hostedcluster CR")
nodepoolName := "62195np" + strings.ToLower(exutil.RandStrDefault())
defer func() {
hostedcluster.deleteNodePool(nodepoolName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
hostedcluster.createAwsNodePool(nodepoolName, 1)
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(nodepoolName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
nodeName := doOcpReq(oc, OcpGet, true, "node", "-l", "hypershift.openshift.io/nodePool="+nodepoolName, "-ojsonpath={.items[*].metadata.name}", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "nodes", nodeName, "node-role.kubernetes.io/infra=//:NoSchedule", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile).Output()
o.Expect(err).Should(o.HaveOccurred())
defer doOcpReq(oc, OcpAdm, true, "taint", "nodes", nodeName, "node-role.kubernetes.io/infra=foo:NoSchedule-", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "nodes", nodeName, "node-role.kubernetes.io/infra=foo:NoSchedule", "--overwrite", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(doOcpReq(oc, OcpGet, true, "node", nodeName, "-o", "jsonpath={.spec.taints[0].value}", "--kubeconfig="+hostedcluster.hostedClustersKubeconfigFile)).Should(o.Equal("foo"))
}) | |||||
test case | openshift/openshift-tests-private | b49dc9ff-e1f0-4a88-b127-693163d7dc15 | HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-60140-[AWS]-create default security group when no security group is specified in a nodepool[Serial] | ['"fmt"', '"os"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:heli-Critical-60140-[AWS]-create default security group when no security group is specified in a nodepool[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while ocp-60140 is for AWS - skipping test ...")
}
caseID := "60140"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check hosted cluster's default worker securitygroup ID")
defaultSG := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}`)
e2e.Logf("defaultWorkerSecurityGroupID in hostedcluster is %s", defaultSG)
g.By("check nodepool and awsmachinetemplate's securitygroup ID")
nodepoolName := doOcpReq(oc, OcpGet, true, "nodepool", "-n", hostedcluster.namespace, "--ignore-not-found", fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].metadata.name}`, hostedcluster.name))
o.Expect(nodepoolName).ShouldNot(o.BeEmpty())
if arr := strings.Split(nodepoolName, " "); len(arr) > 1 {
nodepoolName = arr[0]
}
// OCPBUGS-29723,HOSTEDCP-1419 make sure there is no sg spec in nodepool
o.Expect(doOcpReq(oc, OcpGet, false, "nodepool", "-n", hostedcluster.namespace, nodepoolName, "--ignore-not-found", `-ojsonpath={.spec.platform.aws.securityGroups}`)).Should(o.BeEmpty())
queryJson := fmt.Sprintf(`-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.additionalSecurityGroups[0].id}`, hostedcluster.namespace, nodepoolName)
o.Expect(doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "--ignore-not-found", "-n", hostedcluster.namespace+"-"+hostedcluster.name, queryJson)).Should(o.ContainSubstring(defaultSG))
g.By("create nodepool without default securitygroup")
npCount := 1
npWithoutSG := "np-60140-default-sg"
defer func() {
hostedcluster.deleteNodePool(npWithoutSG)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npWithoutSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
// OCPBUGS-29723,HOSTEDCP-1419 there is no sg spec in np now. Just use NewAWSNodePool() to create a np without sg settings
NewAWSNodePool(npWithoutSG, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&npCount).CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npWithoutSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), " check np ready error")
g.By("check the new nodepool should use the default sg in the hosted cluster")
queryJson = fmt.Sprintf(`-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.additionalSecurityGroups[0].id}`, hostedcluster.namespace, npWithoutSG)
o.Expect(doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, queryJson)).Should(o.ContainSubstring(defaultSG))
g.By("create sg by aws client and use it to create a nodepool")
vpcID := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.spec.platform.aws.cloudProviderConfig.vpc}`)
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
groupID, err := awsClient.CreateSecurityGroup(fmt.Sprintf("ocp-60140-sg-%s", strings.ToLower(exutil.RandStrDefault())), vpcID, "hypershift ocp-60140")
o.Expect(err).ShouldNot(o.HaveOccurred())
defer awsClient.DeleteSecurityGroup(groupID)
npWithExistingSG := "np-60140-existing-sg"
defer func() {
hostedcluster.deleteNodePool(npWithExistingSG)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npWithExistingSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npWithExistingSG, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&npCount).WithSecurityGroupID(groupID).CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npWithExistingSG), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npWithExistingSG))
queryJson = fmt.Sprintf(`-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.template.spec.additionalSecurityGroups}`, hostedcluster.namespace, npWithExistingSG)
sgInfo := doOcpReq(oc, OcpGet, true, "awsmachinetemplate", "-n", hostedcluster.namespace+"-"+hostedcluster.name, queryJson)
o.Expect(sgInfo).Should(o.ContainSubstring(groupID))
// HOSTEDCP-1419 the default sg should be included all the time
o.Expect(sgInfo).Should(o.ContainSubstring(defaultSG))
g.By("nodepool security group test passed")
}) | |||||
test case | openshift/openshift-tests-private | 96d0e314-3130-4433-8278-90127af4a730 | HyperShiftMGMT-Author:heli-Critical-63867-[AWS]-awsendpointservice uses the default security group for the VPC Endpoint | ['"fmt"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:heli-Critical-63867-[AWS]-awsendpointservice uses the default security group for the VPC Endpoint", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while ocp-63867 is for AWS - skipping test ...")
}
endpointAccessType := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.spec.platform.aws.endpointAccess}`)
if endpointAccessType != PublicAndPrivate && endpointAccessType != Private {
g.Skip(fmt.Sprintf("ocp-63867 is for PublicAndPrivate or Private hosted clusters on AWS, skip it for the endpointAccessType is %s", endpointAccessType))
}
g.By("check status of cluster again by condition type Available")
status := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="Available")].status}`)
o.Expect(status).Should(o.Equal("True"))
g.By("get default sg of vpc")
vpcID := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.spec.platform.aws.cloudProviderConfig.vpc}`)
e2e.Logf("hc vpc is %s", vpcID)
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
defaultVPCSG, err := awsClient.GetDefaultSecurityGroupByVpcID(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("in PublicAndPrivate or Private clusters, default sg of vpc should not has hypershift tags kubernetes.io/cluster/{infra-id}:owned")
hcTagKey := HyperShiftResourceTagKeyPrefix + hcInfraID
for _, tag := range defaultVPCSG.Tags {
if tag.Key != nil && *tag.Key == hcTagKey {
o.Expect(*tag.Value).ShouldNot(o.Equal(HyperShiftResourceTagKeyValue))
}
}
g.By("check hosted cluster's default worker security group ID")
defaultWorkerSG := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}`)
e2e.Logf("defaultWorkerSecurityGroupID in hostedcluster is %s", defaultWorkerSG)
o.Expect(defaultWorkerSG).NotTo(o.Equal(defaultVPCSG))
g.By("check endpointID by vpc")
endpointIDs := doOcpReq(oc, OcpGet, true, "awsendpointservice", "-n", hostedcluster.namespace+"-"+hostedcluster.name, "--ignore-not-found", `-ojsonpath={.items[*].status.endpointID}`)
endpointIDArr := strings.Split(endpointIDs, " ")
o.Expect(endpointIDArr).ShouldNot(o.BeEmpty())
for _, epID := range endpointIDArr {
sgs, err := awsClient.GetSecurityGroupsByVpcEndpointID(epID)
o.Expect(err).NotTo(o.HaveOccurred())
for _, sg := range sgs {
e2e.Logf("endpoint %s security group %s, %s, ", epID, *sg.GroupId, *sg.GroupName)
o.Expect(*sg.GroupId).Should(o.Equal(defaultWorkerSG))
o.Expect(*sg.GroupName).Should(o.Equal(hcInfraID + "-default-sg"))
}
}
g.By("ocp-63867 the default security group of endpointservice test passed")
}) | |||||
test case | openshift/openshift-tests-private | 705d3011-3e6a-4d7e-81f3-604c7a8133ca | HyperShiftMGMT-Author:liangli-Critical-48510-Test project configuration resources on the guest cluster[Disruptive] | ['"context"', '"fmt"', '"io/ioutil"', '"os"', '"path/filepath"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:liangli-Critical-48510-Test project configuration resources on the guest cluster[Disruptive]", func() {
caseID := "48510"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
g.By("Generate the default project template")
_, err = bashClient.Run(fmt.Sprintf("oc adm create-bootstrap-project-template -oyaml --kubeconfig=%s > %s", hostedcluster.hostedClustersKubeconfigFile, dir+"/template.yaml")).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Add ResourceQuota and LimitRange in the template")
patchYaml := `- apiVersion: v1
kind: "LimitRange"
metadata:
name: ${PROJECT_NAME}-limits
spec:
limits:
- type: "Container"
default:
cpu: "1"
memory: "1Gi"
defaultRequest:
cpu: "500m"
memory: "500Mi"
- apiVersion: v1
kind: ResourceQuota
metadata:
name: ${PROJECT_NAME}-quota
spec:
hard:
pods: "10"
requests.cpu: "4"
requests.memory: 8Gi
limits.cpu: "6"
limits.memory: 16Gi
requests.storage: 20G
`
tempFilePath := filepath.Join(dir, "temp.yaml")
err = ioutil.WriteFile(tempFilePath, []byte(patchYaml), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = bashClient.Run(fmt.Sprintf(`sed -i '/^parameters:/e cat %s' %s`, dir+"/temp.yaml", dir+"/template.yaml")).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
defer hostedcluster.oc.AsGuestKubeconf().Run("delete").Args("-f", dir+"/template.yaml", "-n", "openshift-config").Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("apply").Args("-f", dir+"/template.yaml", "-n", "openshift-config").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Edit the project config resource to include projectRequestTemplate in the spec")
defer hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestTemplate": null}}`).Execute()
}, "openshift-apiserver")
hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestTemplate":{"name":"project-request"}}}`).Execute()
}, "openshift-apiserver")
g.By("Create a new project 'test-48510'")
origContxt, contxtErr := oc.SetGuestKubeconf(hostedcluster.hostedClustersKubeconfigFile).AsGuestKubeconf().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
err = oc.SetGuestKubeconf(hostedcluster.hostedClustersKubeconfigFile).AsGuestKubeconf().Run("config").Args("use-context", origContxt).Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
err = hostedcluster.oc.AsGuestKubeconf().Run("delete").Args("project", "test-48510").Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
}()
err = hostedcluster.oc.AsGuestKubeconf().Run("new-project").Args("test-48510").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Check if new project config resource includes ResourceQuota and LimitRange")
testProjectDes, err := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("resourcequota", "-n", "test-48510", "-oyaml").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
checkSubstring(testProjectDes, []string{`pods: "10"`, `requests.cpu: "4"`, `requests.memory: 8Gi`, `limits.cpu: "6"`, `limits.memory: 16Gi`, `requests.storage: 20G`})
g.By("Disable project self-provisioning, remove the self-provisioner cluster role from the group")
defer hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"subjects": [{"apiGroup":"rbac.authorization.k8s.io","kind":"Group","name":"system:authenticated:oauth"}]}`).Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"subjects": null}`).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
selfProvisionersDes, err := hostedcluster.oc.AsGuestKubeconf().Run("describe").Args("clusterrolebinding.rbac", "self-provisioners").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(selfProvisionersDes).ShouldNot(o.ContainSubstring("system:authenticated:oauth"))
g.By("Edit the self-provisioners cluster role binding to prevent automatic updates to the role")
defer hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"metadata":{"annotations":{"rbac.authorization.kubernetes.io/autoupdate":"true"}}}`).Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("clusterrolebinding.rbac", "self-provisioners", "-p", `{"metadata":{"annotations":{"rbac.authorization.kubernetes.io/autoupdate":"false"}}}`).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
selfProvisionersDes, err = hostedcluster.oc.AsGuestKubeconf().Run("describe").Args("clusterrolebinding.rbac", "self-provisioners").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(selfProvisionersDes).ShouldNot(o.ContainSubstring(`rbac.authorization.kubernetes.io/autoupdate: "false"`))
g.By("Edit project config resource to include the project request message")
defer hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestMessage": null}}`).Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("project.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"projectRequestMessage":"To request a project, contact your system administrator at [email protected] :-)"}}`).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Create a new project as a non-admin user")
var testRequestMess string
o.Eventually(func() string {
testRequestMess, _ = bashClient.Run(fmt.Sprintf("oc new-project test-request-message --as=liangli --as-group=system:authenticated --as-group=system:authenticated:oauth --kubeconfig=%s || true", hostedcluster.hostedClustersKubeconfigFile)).Output()
return testRequestMess
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("To request a project, contact your system administrator at [email protected] :-)"), "check projectRequestMessage error")
}) | |||||
test case | openshift/openshift-tests-private | 01d0c366-605f-44b0-af01-c3efaa27c99a | HyperShiftMGMT-NonPreRelease-Longduration-Author:heli-Critical-52318-[AWS]-Enforce machineconfiguration.openshift.io/role worker in machine config[Serial] | ['"fmt"', '"path/filepath"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:heli-Critical-52318-[AWS]-Enforce machineconfiguration.openshift.io/role worker in machine config[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Skipping incompatible platform %s", iaasPlatform))
}
g.By("create a configmap for MachineConfig")
fakePubKey := "AAAAB3NzaC1yc2EAAAADAQABAAABgQC0IRdwFtIIy0aURM64dDy0ogqJlV0aqDqw1Pw9VFc8bFSI7zxQ2c3Tt6GrC+Eg7y6mXQbw59laiGlyA+Qmyg0Dgd7BUVg1r8j" +
"RR6Xhf5XbI+tQBhoTQ6BBJKejE60LvyVUiBstGAm7jy6BkfN/5Ulvd8r3OVDYcKczVECWuOQeuPRyTHomR4twQj79+shZkN6tjptQOTTSDJJYIZOmaj9TsDN4bLIxqDYWZC0F6+" +
"TvBoRV7xxOBU8DHxZ9wbCZN4IyEs6U77G8bQBP2Pjbp5NrG93nvdnLcv" +
`CDsnSOFuiay1KNqjOclIlsrb84qN9TFL3PgLoGohz2vInlaTnopCh4m7+xDgu5bdh1B/hNjDHDTHFpHPP8z7vkWM0I4I8q853E4prGRBpyVztcObeDr/0M/Vnwawyb9Lia16J5hSBi0o3UjxE= jiezhao@cube`
configmapMachineConfTemplate := filepath.Join(hypershiftTeamBaseDir, "configmap-machineconfig.yaml")
configmapName := "custom-ssh-config-52318"
cm := configmapMachineConf{
Name: configmapName,
Namespace: hostedcluster.namespace,
SSHAuthorizedKeys: fakePubKey,
Template: configmapMachineConfTemplate,
}
parsedCMFile := "ocp-52318-configmap-machineconfig-template.config"
defer cm.delete(oc, "", parsedCMFile)
cm.create(oc, "", parsedCMFile)
doOcpReq(oc, OcpGet, true, "configmap", configmapName, "-n", hostedcluster.namespace)
g.By("create a nodepool")
npName := "np-52318"
npCount := 1
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).WithNodeCount(&npCount).CreateAWSNodePool()
patchOptions := fmt.Sprintf(`{"spec":{"config":[{"name":"%s"}]}}`, configmapName)
doOcpReq(oc, OcpPatch, true, "nodepool", npName, "-n", hostedcluster.namespace, "--type", "merge", "-p", patchOptions)
g.By("condition UpdatingConfig should be here to reflect nodepool config rolling upgrade")
o.Eventually(func() bool {
return "True" == doOcpReq(oc, OcpGet, false, "nodepool", npName, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="UpdatingConfig")].status}`)
}, ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "nodepool condition UpdatingConfig not found error")
g.By("condition UpdatingConfig should be removed when upgrade completed")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, false, "nodepool", npName, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="UpdatingConfig")].status}`)
}, LongTimeout, LongTimeout/10).Should(o.BeEmpty(), "nodepool condition UpdatingConfig should be removed")
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
g.By("check ssh key in worker nodes")
o.Eventually(func() bool {
workerNodes := hostedcluster.getNodeNameByNodepool(npName)
o.Expect(workerNodes).ShouldNot(o.BeEmpty())
for _, node := range workerNodes {
res, err := hostedcluster.DebugHostedClusterNodeWithChroot("52318", node, "cat", "/home/core/.ssh/authorized_keys")
if err != nil {
e2e.Logf("debug node error node %s: error: %s", node, err.Error())
return false
}
if !strings.Contains(res, fakePubKey) {
e2e.Logf("could not find expected key in node %s: debug ouput: %s", node, res)
return false
}
}
return true
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "key not found error in nodes")
g.By("ocp-52318 Enforce machineconfiguration.openshift.io/role worker in machine config test passed")
}) | |||||
test case | openshift/openshift-tests-private | a613bd3d-fd4d-48fd-a0b7-4efeb75c49f9 | HyperShiftMGMT-Author:liangli-Critical-48511-Test project configuration resources on the guest cluster[Serial] | ['"context"', '"os"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:liangli-Critical-48511-Test project configuration resources on the guest cluster[Serial]", func() {
caseID := "48511"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new project 'test-48511'")
origContxt, contxtErr := hostedcluster.oc.AsGuestKubeconf().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
err = hostedcluster.oc.AsGuestKubeconf().Run("config").Args("use-context", origContxt).Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
err = hostedcluster.oc.AsGuestKubeconf().Run("delete").Args("project", "test-48511").Execute()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
}()
err = hostedcluster.oc.AsGuestKubeconf().Run("new-project").Args("test-48511").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Create a build")
helloWorldSource := "quay.io/openshifttest/ruby-27:1.2.0~https://github.com/openshift/ruby-hello-world"
err = hostedcluster.oc.AsGuestKubeconf().Run("new-build").Args(helloWorldSource, "--name=build-48511", "-n", "test-48511").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Check build")
var buildPhase string
o.Eventually(func() string {
buildPhase, _ = hostedcluster.oc.AsGuestKubeconf().Run("get").Args("builds", "-n", "test-48511", "build-48511-1", `-ojsonpath={.status.phase}`).Output()
return buildPhase
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("Complete"), "wait for the rebuild job complete timeout")
g.By("Add a label on a node")
nodeName, err := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", `-ojsonpath={.items[0].metadata.name}`).Output()
defer hostedcluster.oc.AsGuestKubeconf().Run("label").Args("node", nodeName, "test-").Execute()
err = hostedcluster.oc.AsGuestKubeconf().Run("label").Args("node", nodeName, "test=test1", "--overwrite").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Update nodeSelector in build.config.openshift.io/cluster")
defer hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("build.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"buildOverrides": null}}`).Execute()
}, "openshift-controller-manager")
hostedcluster.updateHostedClusterAndCheck(oc, func() error {
return hostedcluster.oc.AsGuestKubeconf().Run("patch").Args("build.config.openshift.io/cluster", "--type=merge", "-p", `{"spec":{"buildOverrides":{"nodeSelector":{"test":"test1"}}}}`).Execute()
}, "openshift-controller-manager")
g.By("Re-run a build")
err = hostedcluster.oc.AsGuestKubeconf().Run("start-build").Args("--from-build=build-48511-1", "-n", "test-48511", "build-48511-1").Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("Check a new build")
o.Eventually(func() string {
buildPhase, _ = hostedcluster.oc.AsGuestKubeconf().Run("get").Args("builds", "-n", "test-48511", "build-48511-2", `-ojsonpath={.status.phase}`).Output()
return buildPhase
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("Complete"), "wait for the rebuild job complete timeout")
g.By("Check if a new build pod runs on correct node")
podNodeName, _ := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("pod", "-n", "test-48511", "build-48511-2-build", `-ojsonpath={.spec.nodeName}`).Output()
o.Expect(podNodeName).Should(o.Equal(nodeName))
}) | |||||
test case | openshift/openshift-tests-private | bbff78a5-57f9-4b06-abdd-c8f38e5c856a | HyperShiftMGMT-Author:heli-Critical-54476-Critical-62511-Ensure that OAuth server can communicate with GitLab (GitHub) [Serial] | ['"fmt"', '"os"', '"strings"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Author:heli-Critical-54476-Critical-62511-Ensure that OAuth server can communicate with GitLab (GitHub) [Serial]", func() {
g.By("backup current hostedcluster CR")
var bashClient = NewCmdClient()
var hcBackupFile string
defer os.Remove(hcBackupFile)
hcBackupFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hc", hostedcluster.name, "-n", hostedcluster.namespace, "-oyaml").OutputToFile("hypershift-54476-62511")
o.Expect(err).ShouldNot(o.HaveOccurred())
_, err = bashClient.Run(fmt.Sprintf("sed -i '/resourceVersion:/d' %s", hcBackupFile)).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("get OAuth callback URL")
gitlabIDPName := "gitlabidp-54476"
gitlabSecretName := "gitlab-secret-54476"
fakeSecret := "fakeb577d60316d0573de82b8545c8e75c2a48156bcc"
gitlabConf := fmt.Sprintf(`{"spec":{"configuration":{"oauth":{"identityProviders":[{"gitlab":{"clientID":"fake4c397","clientSecret":{"name":"%s"},"url":"https://gitlab.com"},"mappingMethod":"claim","name":"%s","type":"GitLab"}]}}}}`, gitlabSecretName, gitlabIDPName)
githubIDPName := "githubidp-62511"
githubSecretName := "github-secret-62511"
githubConf := fmt.Sprintf(`{"spec":{"configuration":{"oauth":{"identityProviders":[{"github":{"clientID":"f90150abb","clientSecret":{"name":"%s"}},"mappingMethod":"claim","name":"%s","type":"GitHub"}]}}}}`, githubSecretName, githubIDPName)
cpNameSpace := hostedcluster.namespace + "-" + hostedcluster.name
callBackUrl := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "-ojsonpath={.status.oauthCallbackURLTemplate}")
e2e.Logf("OAuth callback URL: %s", callBackUrl)
oauthRoute := doOcpReq(oc, OcpGet, true, "route", "oauth", "-n", cpNameSpace, "-ojsonpath={.spec.host}")
o.Expect(callBackUrl).Should(o.ContainSubstring(oauthRoute))
defer func() {
doOcpReq(oc, OcpApply, false, "-f", hcBackupFile)
o.Eventually(func() bool {
status := doOcpReq(oc, OcpGet, true, "hc", hostedcluster.name, "-n", hostedcluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="Available")].status}`)
if strings.TrimSpace(status) != "True" {
return false
}
replica := doOcpReq(oc, OcpGet, true, "deploy", "oauth-openshift", "-n", cpNameSpace, "-ojsonpath={.spec.replicas}")
availReplica := doOcpReq(oc, OcpGet, false, "deploy", "oauth-openshift", "-n", cpNameSpace, "-ojsonpath={.status.availableReplicas}")
if replica != availReplica {
return false
}
readyReplica := doOcpReq(oc, OcpGet, false, "deploy", "oauth-openshift", "-n", cpNameSpace, "-ojsonpath={.status.readyReplicas}")
if readyReplica != availReplica {
return false
}
return true
}, ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "recover back hosted cluster timeout")
}()
g.By("config gitlab IDP")
defer doOcpReq(oc, OcpDelete, false, "secret", gitlabSecretName, "--ignore-not-found", "-n", hostedcluster.namespace)
doOcpReq(oc, OcpCreate, true, "secret", "generic", gitlabSecretName, "-n", hostedcluster.namespace, fmt.Sprintf(`--from-literal=clientSecret="%s"`, fakeSecret))
doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type=merge", `-p=`+gitlabConf)
o.Eventually(hostedcluster.pollCheckIDPConfigReady(IdentityProviderTypeGitLab, gitlabIDPName, gitlabSecretName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "wait for the gitlab idp config ready timeout")
g.By("config github IDP")
defer doOcpReq(oc, OcpDelete, false, "secret", githubSecretName, "--ignore-not-found", "-n", hostedcluster.namespace)
doOcpReq(oc, OcpCreate, true, "secret", "generic", githubSecretName, "-n", hostedcluster.namespace, fmt.Sprintf(`--from-literal=clientSecret="%s"`, fakeSecret))
doOcpReq(oc, OcpPatch, true, "hostedcluster", hostedcluster.name, "-n", hostedcluster.namespace, "--type=merge", `-p=`+githubConf)
o.Eventually(hostedcluster.pollCheckIDPConfigReady(IdentityProviderTypeGitHub, githubIDPName, githubSecretName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), "wait for the github idp config ready timeout")
}) | |||||
test case | openshift/openshift-tests-private | 4af92f58-5de5-4dfe-b348-b5255f3b5970 | HyperShiftMGMT-Longduration-NonPreRelease-Author:liangli-Critical-63535-Stop triggering rollout on labels/taint change[Serial] | ['"fmt"', '"os"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go/aws"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-Longduration-NonPreRelease-Author:liangli-Critical-63535-Stop triggering rollout on labels/taint change[Serial]", func() {
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Skipping incompatible platform %s", iaasPlatform))
}
caseID := "63535"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create a nodepool")
np1Count := 1
np1Name := "63535test-01"
defer func() {
hostedcluster.deleteNodePool(np1Name)
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
NewAWSNodePool(np1Name, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&np1Count).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(np1Name), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ready error")
g.By("add nodeLabels and taints in the nodepool '63535test-01'")
doOcpReq(oc, OcpPatch, true, "nodepool", np1Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"nodeLabels":{"env":"test"}}}`)
doOcpReq(oc, OcpPatch, true, "nodepool", np1Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"taints":[{"key":"env","value":"test","effect":"PreferNoSchedule"}]}}`)
o.Consistently(func() bool {
value, _ := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np1Name, "--show-labels").Output()
return strings.Contains(value, "env=test")
}, 60*time.Second, 5*time.Second).Should(o.BeFalse())
g.By("Scale the nodepool '63535test-01' to 2")
doOcpReq(oc, OcpScale, true, "nodepool", np1Name, "-n", hostedcluster.namespace, "--replicas=2")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(np1Name), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("nodepool are not scale up to 2 in hostedcluster %s", hostedcluster.name))
g.By("Check if nodeLabels and taints are propagated into new node")
taintsValue, err := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np1Name, `-lenv=test`, `-ojsonpath={.items[*].spec.taints[?(@.key=="env")].value}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(taintsValue).Should(o.ContainSubstring("test"))
g.By("Create a nodepool 'label-taint' with nodeLabels and taints")
np2Count := 1
np2Name := "63535test-02"
defer func() {
hostedcluster.deleteNodePool(np2Name)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(np2Name), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check deleted nodepool error")
}()
NewAWSNodePool(np2Name, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&np2Count).
WithNodeUpgradeType("InPlace").
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(np2Name), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", np2Name))
defer func() {
hostedcluster.deleteNodePool(np2Name)
o.Eventually(hostedcluster.pollCheckAllNodepoolReady(), LongTimeout, LongTimeout/10).Should(o.BeTrue(), "in defer check all nodes ready error")
}()
g.By("add nodeLabels and taints in the nodepool '63535test-02(InPlace)'")
doOcpReq(oc, OcpPatch, true, "nodepool", np2Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"nodeLabels":{"env":"test2"}}}`)
doOcpReq(oc, OcpPatch, true, "nodepool", np2Name, "-n", hostedcluster.namespace, "--type", "merge", "-p", `{"spec":{"taints":[{"key":"env","value":"test2","effect":"PreferNoSchedule"}]}}`)
o.Consistently(func() bool {
value, _ := hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np2Name, "--show-labels").Output()
return strings.Contains(value, "env=test2")
}, 60*time.Second, 5*time.Second).Should(o.BeFalse())
g.By("Scale the nodepool '63535test-02(InPlace)' to 2")
doOcpReq(oc, OcpScale, true, "nodepool", np2Name, "-n", hostedcluster.namespace, "--replicas=2")
o.Eventually(hostedcluster.pollGetHostedClusterReadyNodeCount(np2Name), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("nodepool are not scale up to 2 in hostedcluster %s", hostedcluster.name))
g.By("Check if nodepool 'label-taint' comes up and nodeLabels and taints are propagated into nodes")
taintsValue, err = hostedcluster.oc.AsGuestKubeconf().Run("get").Args("node", "-lhypershift.openshift.io/nodePool="+np1Name, `-lenv=test2`, `-ojsonpath={.items[*].spec.taints[?(@.key=="env")].value}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(taintsValue).Should(o.ContainSubstring("test2"))
}) | |||||
test case | openshift/openshift-tests-private | 3cd8eba4-b33a-4081-803b-63bf35035f18 | HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-67786-Changes to NodePool .spec.platform should trigger a rolling upgrade [Serial] | ['"context"', '"fmt"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-67786-Changes to NodePool .spec.platform should trigger a rolling upgrade [Serial]", func() {
// Variables
var (
testCaseId = "67786"
expectedPlatform = "aws"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
npName = fmt.Sprintf("%s-np", resourceNamePrefix)
npNumReplicas = 2
npInstanceType = "m5.xlarge"
npInstanceTypeNew = "m5.large"
)
if iaasPlatform != expectedPlatform {
g.Skip(fmt.Sprintf("Test case %s is for %s but current platform is %s, skipping", testCaseId, expectedPlatform, iaasPlatform))
}
// Avoid using an existing NodePool so other Hypershift test cases are unaffected by this one
exutil.By("Creating an additional NodePool")
releaseImage := hostedcluster.getCPReleaseImage()
e2e.Logf("Found release image used by the hosted cluster = %s", releaseImage)
defaultSgId := hostedcluster.getDefaultSgId()
o.Expect(defaultSgId).NotTo(o.BeEmpty())
e2e.Logf("Found default SG ID of the hosted cluster = %s", defaultSgId)
defer func() {
hostedcluster.deleteNodePool(npName)
o.Eventually(hostedcluster.pollCheckDeletedNodePool(npName), LongTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s to be deleted", npName))
}()
NewAWSNodePool(npName, hostedcluster.name, hostedcluster.namespace).
WithNodeCount(&npNumReplicas).
WithReleaseImage(releaseImage).
WithInstanceType(npInstanceType).
WithSecurityGroupID(defaultSgId).
CreateAWSNodePool()
o.Eventually(hostedcluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s to be ready", npName))
exutil.By("Checking instance type on CAPI resources")
awsMachineTemp, err := hostedcluster.getCurrentInfraMachineTemplatesByNodepool(context.Background(), npName)
o.Expect(err).ShouldNot(o.HaveOccurred())
instanceType, found, err := unstructured.NestedString(awsMachineTemp.Object, "spec", "template", "spec", "instanceType")
o.Expect(found).To(o.BeTrue())
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).To(o.Equal(npInstanceType))
exutil.By("Checking instance type label on nodes belonging to the newly created NodePool")
nodeList, err := oc.GuestKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
hypershiftNodePoolLabelKey: npName,
nodeInstanceTypeLabelKey: npInstanceType,
}).String(),
})
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(len(nodeList.Items)).To(o.Equal(npNumReplicas))
exutil.By(fmt.Sprintf("Change instance type to %s", npInstanceTypeNew))
patch := fmt.Sprintf(`{"spec":{"platform":{"aws":{"instanceType": "%s"}}}}`, npInstanceTypeNew)
doOcpReq(oc, OcpPatch, true, "np", npName, "-n", hostedcluster.namespace, "--type", "merge", "-p", patch)
exutil.By("Waiting for replace upgrade to complete")
upgradeType := hostedcluster.getNodepoolUpgradeType(npName)
o.Expect(upgradeType).Should(o.ContainSubstring("Replace"))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeIntermediateStatus(npName), ShortTimeout, ShortTimeout/10).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s replace upgrade to start", npName))
o.Eventually(hostedcluster.pollCheckNodepoolRollingUpgradeComplete(npName), DoubleLongTimeout, DefaultTimeout/5).Should(o.BeTrue(), fmt.Sprintf("failed waiting for NodePool/%s replace upgrade to complete", npName))
exutil.By("Make sure the instance type is updated on CAPI resources")
awsMachineTemp, err = hostedcluster.getCurrentInfraMachineTemplatesByNodepool(context.Background(), npName)
o.Expect(err).ShouldNot(o.HaveOccurred())
instanceType, found, err = unstructured.NestedString(awsMachineTemp.Object, "spec", "template", "spec", "instanceType")
o.Expect(found).To(o.BeTrue())
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).To(o.Equal(npInstanceTypeNew))
exutil.By("Make sure the node instance types are updated as well")
nodeList, err = oc.GuestKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
hypershiftNodePoolLabelKey: npName,
nodeInstanceTypeLabelKey: npInstanceTypeNew,
}).String(),
})
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(len(nodeList.Items)).To(o.Equal(npNumReplicas))
}) | |||||
test case | openshift/openshift-tests-private | c5b5ff12-5ab3-4c54-b840-01c0d84aefe0 | HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-70974-Test Hosted Cluster etcd automatic defragmentation [Disruptive] | ['"fmt"', '"os"', '"path"', '"strconv"', '"strings"', '"github.com/aws/aws-sdk-go/service/sts"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/labels"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-70974-Test Hosted Cluster etcd automatic defragmentation [Disruptive]", func() {
if !hostedcluster.isCPHighlyAvailable() {
g.Skip("This test case runs against a hosted cluster with highly available control plane, skipping")
}
var (
testCaseId = "70974"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tmpDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
hcpNs = hostedcluster.getHostedComponentNamespace()
cmNamePrefix = fmt.Sprintf("%s-cm", resourceNamePrefix)
cmIdx = 0
cmBatchSize = 500
cmData = strings.Repeat("a", 100_000)
cmNs = "default"
etcdDefragThreshold = 0.45
etcdDefragMargin = 0.05
etcdDbSwellingRate = 4
etcdDbContractionRate = 2
testEtcdEndpointIdx = 0
)
var (
getCM = func() string {
cmIdx++
return fmt.Sprintf(`apiVersion: v1
kind: ConfigMap
metadata:
name: %s-%03d
namespace: %s
labels:
foo: bar
data:
foo: %s
---
`, cmNamePrefix, cmIdx, cmNs, cmData)
}
)
exutil.By("Creating temporary directory")
err := os.MkdirAll(tmpDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err = os.RemoveAll(tmpDir)
o.Expect(err).NotTo(o.HaveOccurred())
}()
exutil.By("Making sure the (hosted) control plane is highly available by checking the number of etcd Pods")
etcdPodCountStr := doOcpReq(oc, OcpGet, true, "sts", "etcd", "-n", hcpNs, "-o=jsonpath={.spec.replicas}")
o.Expect(strconv.Atoi(etcdPodCountStr)).To(o.BeNumerically(">", 1), "Expect >1 etcd Pods")
exutil.By("Getting DB size of an ETCD member")
_, dbSizeInUse, _, err := hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
targetDbSize := dbSizeInUse * int64(etcdDbSwellingRate)
e2e.Logf("Found initial ETCD member DB size in use = %d, target ETCD member DB size = %d", dbSizeInUse, targetDbSize)
exutil.By("Creating ConfigMaps on the guest cluster until the ETCD member DB size is large enough")
var dbSizeBeforeDefrag int64
defer func() {
_, err = oc.AsGuestKubeconf().Run("delete").Args("cm", "-n=default", "-l=foo=bar", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
o.Eventually(func() (done bool) {
// Check ETCD endpoint for DB size
dbSizeBeforeDefrag, _, _, err = hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
if dbSizeBeforeDefrag >= targetDbSize {
return true
}
// Create temporary file
f, err := os.CreateTemp(tmpDir, "ConfigMaps")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
if err = f.Close(); err != nil {
e2e.Logf("Error closing file %s: %v", f.Name(), err)
}
if err = os.Remove(f.Name()); err != nil {
e2e.Logf("Error removing file %s: %v", f.Name(), err)
}
}()
// Write resources to file.
// For a batch size of 500, the resources will occupy a bit more than 50 MB of space.
for i := 0; i < cmBatchSize; i++ {
_, err = f.WriteString(getCM())
o.Expect(err).NotTo(o.HaveOccurred())
}
err = f.Sync()
o.Expect(err).NotTo(o.HaveOccurred())
fs, err := f.Stat()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("File size = %d", fs.Size())
// Create all the resources on the guest cluster
// Omit countless lines of "XXX created" output
_, err = oc.AsGuestKubeconf().Run("create").Args("-f", f.Name()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return false
}).WithTimeout(LongTimeout).WithPolling(LongTimeout / 10).Should(o.BeTrue())
exutil.By("Deleting all ConfigMaps")
_, err = oc.AsGuestKubeconf().Run("delete").Args("cm", "-n=default", "-l=foo=bar").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Waiting until the fragmentation ratio is above threshold+margin")
o.Eventually(func() (done bool) {
_, _, dbFragRatio, err := hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
return dbFragRatio > etcdDefragThreshold+etcdDefragMargin
}).WithTimeout(LongTimeout).WithPolling(LongTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting until defragmentation is done which causes DB size to decrease")
o.Eventually(func() (done bool) {
dbSize, _, _, err := hostedcluster.getEtcdEndpointDbStatsByIdx(testEtcdEndpointIdx)
o.Expect(err).NotTo(o.HaveOccurred())
return dbSize < dbSizeBeforeDefrag/int64(etcdDbContractionRate)
}).WithTimeout(DoubleLongTimeout).WithPolling(LongTimeout / 10).Should(o.BeTrue())
_, err = exutil.WaitAndGetSpecificPodLogs(oc, hcpNs, "etcd", "etcd-0", "defrag")
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | ec65aeb7-90c4-46f1-b8ae-f345bbb7a4f4 | HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-72055-Automated etcd backups for Managed services | ['"context"', '"fmt"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/aws/aws-sdk-go/aws/credentials"', '"github.com/aws/aws-sdk-go/aws/session"', 'awsiam "github.com/aws/aws-sdk-go/service/iam"', '"github.com/aws/aws-sdk-go/service/s3"', '"github.com/aws/aws-sdk-go/service/sts"', '"k8s.io/kubernetes/test/utils/format"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift.go | g.It("HyperShiftMGMT-NonPreRelease-Longduration-Author:fxie-Critical-72055-Automated etcd backups for Managed services", func() {
// Skip incompatible platforms
// The etcd snapshots will be backed up to S3 so this test case runs on AWS only
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Skipping incompatible platform %s", iaasPlatform))
}
// The management cluster has to be an STS cluster as the SA token will be used to assume an existing AWS role
if !exutil.IsSTSCluster(oc) {
g.Skip("This test case must run on an STS management cluster, skipping")
}
// Restrict CPO's version to >= 4.16.0
// TODO(fxie): remove this once https://github.com/openshift/hypershift/pull/3034 gets merged and is included in the payload
hcVersion := exutil.GetHostedClusterVersion(oc, hostedcluster.name, hostedcluster.namespace)
e2e.Logf("Found hosted cluster version = %q", hcVersion)
hcVersion.Pre = nil
minHcVersion := semver.MustParse("4.16.0")
if hcVersion.LT(minHcVersion) {
g.Skip(fmt.Sprintf("The hosted cluster's version (%q) is too low, skipping", hcVersion))
}
var (
testCaseId = getTestCaseIDs()[0]
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
etcdBackupBucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
etcdBackupRoleName = fmt.Sprintf("%s-role", resourceNamePrefix)
etcdBackupRolePolicyArn = "arn:aws:iam::aws:policy/AmazonS3FullAccess"
hcpNs = hostedcluster.getHostedComponentNamespace()
adminKubeClient = oc.AdminKubeClient()
ctx = context.Background()
)
// It is impossible to rely on short-lived tokens like operators on the management cluster:
// there isn't a preexisting role with enough permissions for us to assume.
exutil.By("Getting an AWS session with credentials obtained from cluster profile")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
creds := credentials.NewSharedCredentials(getAWSPrivateCredentials(), "default")
var sess *session.Session
sess, err = session.NewSession(&aws.Config{
Credentials: creds,
Region: aws.String(region),
})
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Getting AWS account ID")
stsClient := exutil.NewDelegatingStsClient(sts.New(sess))
var getCallerIdOutput *sts.GetCallerIdentityOutput
getCallerIdOutput, err = stsClient.GetCallerIdentityWithContext(ctx, &sts.GetCallerIdentityInput{})
o.Expect(err).NotTo(o.HaveOccurred())
awsAcctId := aws.StringValue(getCallerIdOutput.Account)
e2e.Logf("Found AWS account ID = %s", awsAcctId)
exutil.By("Getting SA issuer of the management cluster")
saIssuer := doOcpReq(oc, OcpGet, true, "authentication/cluster", "-o=jsonpath={.spec.serviceAccountIssuer}")
// An OIDC provider's URL is prefixed with https://
saIssuerStripped := strings.TrimPrefix(saIssuer, "https://")
e2e.Logf("Found SA issuer of the management cluster = %s", saIssuerStripped)
exutil.By("Creating AWS role")
iamClient := exutil.NewDelegatingIAMClient(awsiam.New(sess))
var createRoleOutput *awsiam.CreateRoleOutput
createRoleOutput, err = iamClient.CreateRoleWithContext(ctx, &awsiam.CreateRoleInput{
RoleName: aws.String(etcdBackupRoleName),
AssumeRolePolicyDocument: aws.String(iamRoleTrustPolicyForEtcdBackup(awsAcctId, saIssuerStripped, hcpNs)),
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_, err = iamClient.DeleteRoleWithContext(ctx, &awsiam.DeleteRoleInput{
RoleName: aws.String(etcdBackupRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred())
}()
e2e.Logf("Attaching policy %s to role %s", etcdBackupRolePolicyArn, etcdBackupRoleName)
o.Expect(iamClient.AttachRolePolicy(etcdBackupRoleName, etcdBackupRolePolicyArn)).NotTo(o.HaveOccurred())
defer func() {
// Required for role deletion
o.Expect(iamClient.DetachRolePolicy(etcdBackupRoleName, etcdBackupRolePolicyArn)).NotTo(o.HaveOccurred())
}()
roleArn := aws.StringValue(createRoleOutput.Role.Arn)
exutil.By("Creating AWS S3 bucket")
s3Client := exutil.NewDelegatingS3Client(s3.New(sess))
o.Expect(s3Client.CreateBucket(etcdBackupBucketName)).NotTo(o.HaveOccurred())
defer func() {
// Required for bucket deletion
o.Expect(s3Client.EmptyBucketWithContextAndCheck(ctx, etcdBackupBucketName)).NotTo(o.HaveOccurred())
o.Expect(s3Client.DeleteBucket(etcdBackupBucketName)).NotTo(o.HaveOccurred())
}()
exutil.By("Creating CM/etcd-backup-config")
e2e.Logf("Found management cluster region = %s", region)
etcdBackupConfigCm := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-backup-config",
},
Data: map[string]string{
"bucket-name": etcdBackupBucketName,
"region": region,
"role-arn": roleArn,
},
}
_, err = adminKubeClient.CoreV1().ConfigMaps(hcpNs).Create(ctx, &etcdBackupConfigCm, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
defer doOcpReq(oc, OcpDelete, true, "cm/etcd-backup-config", "-n", hcpNs)
e2e.Logf("CM/etcd-backup-config created:\n%s", format.Object(etcdBackupConfigCm, 0))
exutil.By("Waiting for the etcd backup CronJob to be created")
o.Eventually(func() bool {
return oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("cronjob/etcd-backup", "-n", hcpNs).Execute() == nil
}).WithTimeout(DefaultTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting for the first job execution to be successful")
o.Eventually(func() bool {
lastSuccessfulTime, _, err := oc.AsAdmin().WithoutNamespace().Run(OcpGet).
Args("cronjob/etcd-backup", "-n", hcpNs, "-o=jsonpath={.status.lastSuccessfulTime}").Outputs()
return err == nil && len(lastSuccessfulTime) > 0
}).WithTimeout(70 * time.Minute).WithPolling(5 * time.Minute).Should(o.BeTrue())
exutil.By("Waiting for the backup to be uploaded")
o.Expect(s3Client.WaitForBucketEmptinessWithContext(ctx, etcdBackupBucketName,
exutil.BucketNonEmpty, 5*time.Second /* Interval */, 1*time.Minute /* Timeout */)).NotTo(o.HaveOccurred())
}) | |||||
test | openshift/openshift-tests-private | c0a633d1-e416-4640-b5e3-f83a9ff9eaf2 | hypershift_hosted | import (
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_hosted.go | package hypershift
import (
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-hypershift] Hypershift", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("hypershift-hosted", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
if !exutil.IsHypershiftHostedCluster(oc) {
g.Skip("not a hosted cluster, skip the test case")
}
})
// author: [email protected]
g.It("NonPreRelease-PreChkUpgrade-PstChkUpgrade-Author:heli-Critical-66831-HCP to support mgmt on 4.13 non-OVNIC and hosted on 4.14 OVN-IC and mgmt to upgrade to 4.14", func() {
version := doOcpReq(oc, OcpGet, true, "clusterversion", "version", `-ojsonpath={.status.desired.version}`)
g.By(fmt.Sprintf("check hosted cluster version: %s", version))
ovnLeaseHolder := doOcpReq(oc, OcpGet, true, "lease", "ovn-kubernetes-master", "-n", "openshift-ovn-kubernetes", `-ojsonpath={.spec.holderIdentity}`)
g.By(fmt.Sprintf("check hosted cluster ovn lease holder: %s", ovnLeaseHolder))
if strings.Contains(version, "4.13") {
// currently we only check aws 4.13
if strings.ToLower(exutil.CheckPlatform(oc)) == "aws" {
o.Expect(ovnLeaseHolder).Should(o.ContainSubstring("compute.internal"))
}
}
if strings.Contains(version, "4.14") {
o.Expect(ovnLeaseHolder).Should(o.ContainSubstring("ovnkube-control-plane"))
}
})
})
| package hypershift | ||||
test case | openshift/openshift-tests-private | 99af7cda-0337-4d34-b470-4537dcbbd1ed | NonPreRelease-PreChkUpgrade-PstChkUpgrade-Author:heli-Critical-66831-HCP to support mgmt on 4.13 non-OVNIC and hosted on 4.14 OVN-IC and mgmt to upgrade to 4.14 | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_hosted.go | g.It("NonPreRelease-PreChkUpgrade-PstChkUpgrade-Author:heli-Critical-66831-HCP to support mgmt on 4.13 non-OVNIC and hosted on 4.14 OVN-IC and mgmt to upgrade to 4.14", func() {
version := doOcpReq(oc, OcpGet, true, "clusterversion", "version", `-ojsonpath={.status.desired.version}`)
g.By(fmt.Sprintf("check hosted cluster version: %s", version))
ovnLeaseHolder := doOcpReq(oc, OcpGet, true, "lease", "ovn-kubernetes-master", "-n", "openshift-ovn-kubernetes", `-ojsonpath={.spec.holderIdentity}`)
g.By(fmt.Sprintf("check hosted cluster ovn lease holder: %s", ovnLeaseHolder))
if strings.Contains(version, "4.13") {
// currently we only check aws 4.13
if strings.ToLower(exutil.CheckPlatform(oc)) == "aws" {
o.Expect(ovnLeaseHolder).Should(o.ContainSubstring("compute.internal"))
}
}
if strings.Contains(version, "4.14") {
o.Expect(ovnLeaseHolder).Should(o.ContainSubstring("ovnkube-control-plane"))
}
}) | |||||
test | openshift/openshift-tests-private | ed59b3bc-a346-40b0-bb48-882f0d453cd3 | hypershift_install | import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"sort"
"strings"
"text/template"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
errors2 "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/util/taints"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/utils/ptr"
"k8s.io/utils/strings/slices"
operatorv1 "github.com/openshift/api/operator/v1"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | package hypershift
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"sort"
"strings"
"text/template"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
errors2 "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/util/taints"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/utils/ptr"
"k8s.io/utils/strings/slices"
operatorv1 "github.com/openshift/api/operator/v1"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
)
var _ = g.Describe("[sig-hypershift] Hypershift", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("hypershift-install", exutil.KubeConfigPath())
bashClient *CLI
iaasPlatform string
fixturePath string
)
g.BeforeEach(func() {
operator := doOcpReq(oc, OcpGet, false, "pods", "-n", "hypershift", "-ojsonpath={.items[*].metadata.name}")
if len(operator) > 0 {
g.Skip("hypershift operator found, skip install test run")
}
bashClient = NewCmdClient()
iaasPlatform = exutil.CheckPlatform(oc)
fixturePath = exutil.FixturePath("testdata", "hypershift")
version, _ := bashClient.WithShowInfo(true).Run("hypershift version").Output()
e2e.Logf("Found hypershift CLI version:\n%s", version)
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-42718-[HyperShiftINSTALL] Create a hosted cluster on aws using hypershift tool [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42718 is for AWS - skipping test ...")
}
caseID := "42718"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(2)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-42866-[HyperShiftINSTALL] Create HostedCluster infrastructure on AWS by using Hypershift CLI [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42866 is for AWS - skipping test ...")
}
caseID := "42866"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Create the AWS infrastructure")
infraFile := installHelper.dir + "/" + clusterName + "-infra.json"
infra := installHelper.createInfraCommonBuilder().
withInfraID(clusterName + exutil.RandStrCustomize("123456789", 4)).
withOutputFile(infraFile)
defer installHelper.destroyAWSInfra(infra)
installHelper.createAWSInfra(infra)
exutil.By("Create AWS IAM resources")
iamFile := installHelper.dir + "/" + clusterName + "-iam.json"
iam := installHelper.createIamCommonBuilder(infraFile).
withInfraID(infra.InfraID).
withOutputFile(iamFile)
defer installHelper.destroyAWSIam(iam)
installHelper.createAWSIam(iam)
exutil.By("create aws HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withInfraJSON(infraFile).
withIamJSON(iamFile)
defer installHelper.destroyAWSHostedClusters(createCluster)
cluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("check vpc is as expected")
vpcID, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("awsclusters", "-n", cluster.namespace+"-"+cluster.name, cluster.name, `-ojsonpath='{.spec.network.vpc.id}'`).Output()
o.Expect(vpcID).NotTo(o.BeEmpty())
vpc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", "-n", cluster.namespace, cluster.name, `-ojsonpath='{.spec.platform.aws.cloudProviderConfig.vpc}'`).Output()
o.Expect(strings.Compare(vpcID, vpc) == 0).Should(o.BeTrue())
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-42867-[HyperShiftINSTALL] Create iam and infrastructure repeatedly with the same infra-id on aws [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42867 is for AWS - skipping test ...")
}
caseID := "42867"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Create the AWS infrastructure 1")
infraFile := installHelper.dir + "/" + clusterName + "-infra.json"
infra := installHelper.createInfraCommonBuilder().
withName(clusterName + "infra1").
withInfraID(clusterName + exutil.RandStrCustomize("123456789", 4)).
withOutputFile(infraFile)
defer installHelper.destroyAWSInfra(infra)
installHelper.createAWSInfra(infra)
exutil.By("Create AWS IAM resources 1")
iamFile := installHelper.dir + "/" + clusterName + "-iam.json"
iam := installHelper.createIamCommonBuilder(infraFile).
withInfraID(infra.InfraID).
withOutputFile(iamFile)
defer installHelper.destroyAWSIam(iam)
installHelper.createAWSIam(iam)
exutil.By("Create the AWS infrastructure 2")
infraFile2 := installHelper.dir + "/" + clusterName + "-infra2.json"
infra2 := installHelper.createInfraCommonBuilder().
withName(clusterName + "infra2").
withInfraID(infra.InfraID).
withOutputFile(infraFile2)
defer installHelper.destroyAWSInfra(infra2)
installHelper.createAWSInfra(infra2)
exutil.By("Create AWS IAM resources 2")
iamFile2 := installHelper.dir + "/" + clusterName + "-iam2.json"
iam2 := installHelper.createIamCommonBuilder(infraFile2).
withInfraID(infra2.InfraID).
withOutputFile(iamFile2)
defer installHelper.destroyAWSIam(iam2)
installHelper.createAWSIam(iam2)
exutil.By("Compare two infra file")
o.Expect(reflect.DeepEqual(getJSONByFile(infraFile, "zones"), getJSONByFile(infraFile2, "zones"))).Should(o.BeTrue())
exutil.By("Compare two iam file")
o.Expect(strings.Compare(getSha256ByFile(iamFile), getSha256ByFile(iamFile2)) == 0).Should(o.BeTrue())
})
// author: [email protected]
g.It("NonPreRelease-Longduration-Author:liangli-Critical-42952-[HyperShiftINSTALL] create multiple clusters without manifest crash and delete them asynchronously [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42952 is for AWS - skipping test ...")
}
caseID := "42952"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create aws HostedClusters 1")
createCluster1 := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID + "-1").
withNodePoolReplicas(1)
defer installHelper.deleteHostedClustersManual(createCluster1)
hostedCluster1 := installHelper.createAWSHostedClusters(createCluster1)
exutil.By("create aws HostedClusters 2")
createCluster2 := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID + "-2").
withNodePoolReplicas(1)
defer installHelper.deleteHostedClustersManual(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
exutil.By("delete HostedClusters CR background")
installHelper.deleteHostedClustersCRAllBackground()
exutil.By("check delete AWS HostedClusters asynchronously")
o.Eventually(func() int {
deletionTimestamp1, _ := hostedCluster1.getClustersDeletionTimestamp()
deletionTimestamp2, _ := hostedCluster2.getClustersDeletionTimestamp()
if len(deletionTimestamp1) == 0 || len(deletionTimestamp2) == 0 {
return -1
}
e2e.Logf("deletionTimestamp1:%s, deletionTimestamp2:%s", deletionTimestamp1, deletionTimestamp2)
return strings.Compare(deletionTimestamp1, deletionTimestamp2)
}, ShortTimeout, ShortTimeout/10).Should(o.Equal(0), "destroy AWS HostedClusters asynchronously error")
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-44924-[HyperShiftINSTALL] Test multi-zonal control plane components spread with HA mode enabled [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44924 is for AWS - skipping test ...")
}
caseID := "44924"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(2)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClustersRender(createCluster, func(filename string) error {
exutil.By("Set HighlyAvailable mode")
return replaceInFile(filename, "SingleReplica", "HighlyAvailable")
})
exutil.By("Check if pods of multi-zonal control plane components spread across multi-zone")
deploymentNames, err := hostedCluster.getHostedClustersHACPWorkloadNames("deployment")
o.Expect(err).NotTo(o.HaveOccurred())
for _, name := range deploymentNames {
value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", hostedCluster.namespace+"-"+hostedCluster.name, name, `-ojsonpath={.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[*].topologyKey}}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("deployment: %s: %s", name, value))
o.Expect(value).Should(o.ContainSubstring("topology.kubernetes.io/zone"), fmt.Sprintf("deployment: %s lack of anti-affinity of zone", name))
}
statefulSetNames, err := hostedCluster.getHostedClustersHACPWorkloadNames("statefulset")
o.Expect(err).NotTo(o.HaveOccurred())
for _, name := range statefulSetNames {
value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("statefulset", "-n", hostedCluster.namespace+"-"+hostedCluster.name, name, `-ojsonpath={.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[*].topologyKey}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("statefulSetNames: %s: %s", name, value))
o.Expect(value).Should(o.ContainSubstring("topology.kubernetes.io/zone"), fmt.Sprintf("statefulset: %s lack of anti-affinity of zone", name))
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-44981-[HyperShiftINSTALL] Test built-in control plane pod tolerations [Serial] [Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44981 is for AWS - skipping test ...")
}
nodeAction := newNodeAction(oc)
nodes, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodes) < 2 {
g.Skip("work node should >= 2 - skipping test ...")
}
caseID := "44981"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err = os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("update taint and label, taint and label use key 'hypershift.openshift.io/cluster'")
defer nodeAction.taintNode(nodes[0], "hypershift.openshift.io/cluster="+oc.Namespace()+"-"+clusterName+":NoSchedule-")
nodeAction.taintNode(nodes[0], "hypershift.openshift.io/cluster="+oc.Namespace()+"-"+clusterName+":NoSchedule")
defer nodeAction.labelNode(nodes[0], "hypershift.openshift.io/cluster-")
nodeAction.labelNode(nodes[0], "hypershift.openshift.io/cluster="+oc.Namespace()+"-"+clusterName)
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().withName(clusterName).withNodePoolReplicas(0)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Check if control plane pods in HostedClusters are on " + nodes[0])
o.Eventually(hostedCluster.pollIsCPPodOnlyRunningOnOneNode(nodes[0]), DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Check if control plane pods in HostedClusters error")
exutil.By("update taint and label, taint and label use key 'hypershift.openshift.io/control-plane'")
defer nodeAction.taintNode(nodes[1], "hypershift.openshift.io/control-plane=true:NoSchedule-")
nodeAction.taintNode(nodes[1], "hypershift.openshift.io/control-plane=true:NoSchedule")
defer nodeAction.labelNode(nodes[1], "hypershift.openshift.io/control-plane-")
nodeAction.labelNode(nodes[1], "hypershift.openshift.io/control-plane=true")
exutil.By("create HostedClusters 2")
createCluster2 := installHelper.createClusterAWSCommonBuilder().withName(clusterName + "-2").withNodePoolReplicas(0)
defer installHelper.destroyAWSHostedClusters(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
exutil.By("Check if control plane pods in HostedClusters are on " + nodes[1])
o.Eventually(hostedCluster2.pollIsCPPodOnlyRunningOnOneNode(nodes[1]), DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Check if control plane pods in HostedClusters error")
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-45341-[HyperShiftINSTALL] Test NodePort Publishing Strategy [Serial] [Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44981 is for AWS - skipping test ...")
}
caseID := "45341"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Create a nodeport ip bastion")
preStartJobSetup := newPreStartJob(clusterName+"-setup", oc.Namespace(), caseID, "setup", dir)
preStartJobTeardown := newPreStartJob(clusterName+"-teardown", oc.Namespace(), caseID, "teardown", dir)
defer preStartJobSetup.delete(oc)
preStartJobSetup.create(oc)
defer preStartJobTeardown.delete(oc)
defer preStartJobTeardown.create(oc)
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(1)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClustersRender(createCluster, func(filename string) error {
exutil.By("Test NodePort Publishing Strategy")
ip := preStartJobSetup.preStartJobIP(oc)
e2e.Logf("ip:" + ip)
return replaceInFile(filename, "type: LoadBalancer", "type: NodePort\n nodePort:\n address: "+ip)
})
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(1), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-47053-[HyperShiftINSTALL] Test InfrastructureTopology configuration [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 47053 is for AWS - skipping test ...")
}
caseID := "47053"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters-1")
createCluster1 := installHelper.createClusterAWSCommonBuilder().
withName(clusterName + "-1").
withNodePoolReplicas(1)
defer installHelper.destroyAWSHostedClusters(createCluster1)
hostedCluster1 := installHelper.createAWSHostedClusters(createCluster1)
exutil.By("check HostedClusters-1 HostedClusterInfrastructureTopology")
installHelper.createHostedClusterKubeconfig(createCluster1, hostedCluster1)
o.Eventually(hostedCluster1.pollGetHostedClusterInfrastructureTopology(), LongTimeout, LongTimeout/10).Should(o.ContainSubstring("SingleReplica"), fmt.Sprintf("--infra-availability-policy (default SingleReplica) error"))
exutil.By("create HostedClusters-2 infra-availability-policy: HighlyAvailable")
createCluster2 := installHelper.createClusterAWSCommonBuilder().
withName(clusterName + "-2").
withNodePoolReplicas(2).
withInfraAvailabilityPolicy("HighlyAvailable")
defer installHelper.destroyAWSHostedClusters(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
exutil.By("check HostedClusters-2 HostedClusterInfrastructureTopology")
installHelper.createHostedClusterKubeconfig(createCluster2, hostedCluster2)
o.Eventually(hostedCluster2.pollGetHostedClusterInfrastructureTopology(), LongTimeout, LongTimeout/10).Should(o.ContainSubstring("HighlyAvailable"), fmt.Sprintf("--infra-availability-policy HighlyAvailable"))
exutil.By("Check if pods of multi-zonal components spread across multi-zone")
o.Eventually(func() string {
value, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+hostedCluster2.hostedClustersKubeconfigFile, "deployment", "-A", "-ojsonpath={.items[*].spec.replicas}").Output()
return strings.ReplaceAll(strings.ReplaceAll(value, "1", ""), " ", "")
}, DefaultTimeout, DefaultTimeout/10).ShouldNot(o.BeEmpty())
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-48133-[HyperShiftINSTALL] Apply user defined tags to all AWS resources [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48133 is for AWS - skipping test ...")
}
caseID := "48133"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(2).
withAdditionalTags("adminContact=HyperShiftInstall,customTag=test")
defer installHelper.destroyAWSHostedClusters(createCluster)
cluster := installHelper.createAWSHostedClusters(createCluster)
installHelper.createHostedClusterKubeconfig(createCluster, cluster)
exutil.By("Confirm user defined tags")
checkSubstring(doOcpReq(oc, OcpGet, false, "hostedcluster", "-n", cluster.namespace, cluster.name, `-ojsonpath={.spec.platform.aws.resourceTags}`), []string{`{"key":"adminContact","value":"HyperShiftInstall"}`, `{"key":"customTag","value":"test"}`})
o.Expect(strings.Count(doOcpReq(oc, OcpGet, false, "awsmachines", "-n", cluster.namespace+"-"+cluster.name, `-ojsonpath={.items[*].spec.additionalTags}`), "HyperShiftInstall")).Should(o.Equal(2))
checkSubstring(doOcpReq(oc, OcpGet, false, "--kubeconfig="+cluster.hostedClustersKubeconfigFile, "infrastructure", "cluster", `-ojsonpath={.status.platformStatus.aws.resourceTags}`), []string{`{"key":"adminContact","value":"HyperShiftInstall"}`, `{"key":"customTag","value":"test"}`})
checkSubstring(doOcpReq(oc, OcpGet, false, "--kubeconfig="+cluster.hostedClustersKubeconfigFile, "-n", "openshift-ingress", "svc/router-default", `-ojsonpath={.metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-additional-resource-tags}`), []string{"adminContact=HyperShiftInstall", "customTag=test"})
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-48672-[HyperShiftINSTALL] Create multi-zone AWS infrastructure and NodePools via CLI [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48672 is for AWS - skipping test ...")
}
// this case needs 3 zones
zones := getAWSMgmtClusterRegionAvailableZones(oc)
if len(zones) < 3 {
g.Skip("mgmt cluster has less than 3 zones: " + strings.Join(zones, " ") + " - skipping test ...")
}
caseID := "48672"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(1).
withZones(strings.Join(zones[:3], ",")).
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
exutil.By("Check the hostedcluster and nodepool")
checkSubstring(doOcpReq(oc, OcpGet, false, "awsmachines", "-n", hostedCluster.namespace+"-"+hostedCluster.name, `-ojsonpath={.items[*].spec.providerID}`), zones[:3])
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(len(zones)), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-49129-[HyperShiftINSTALL] Create multi-zone Azure infrastructure and nodepools via CLI [Serial]", func() {
if iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49129 is for azure - skipping test ...")
}
caseID := "49129"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{oc: oc, dir: dir, iaasPlatform: iaasPlatform}
exutil.By("install HyperShift operator")
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAzureCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(2).
withReleaseImage(release)
defer installHelper.destroyAzureHostedClusters(createCluster)
hostedCluster := installHelper.createAzureHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liangli-Critical-49173-[HyperShiftINSTALL] Test Azure node root disk size [Serial]", func() {
if iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49173 is for azure - skipping test ...")
}
caseID := "49173"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{oc: oc, dir: dir, iaasPlatform: iaasPlatform}
exutil.By("install HyperShift operator")
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAzureCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(1).
withRootDiskSize(64).
withReleaseImage(release)
defer installHelper.destroyAzureHostedClusters(createCluster)
hostedCluster := installHelper.createAzureHostedClusters(createCluster)
exutil.By("Check the disk size for the nodepool '" + hostedCluster.name + "'")
o.Expect(hostedCluster.getAzureDiskSizeGBByNodePool(hostedCluster.name)).Should(o.ContainSubstring("64"))
exutil.By("Get subnet ID of the hosted cluster")
subnetId := hostedCluster.getAzureSubnetId()
e2e.Logf("Found subnet ID = %s", subnetId)
exutil.By("create nodepool and check root-disk-size (default 120)")
np1Name := hostedCluster.name + "-1"
NewAzureNodePool(np1Name, hostedCluster.name, oc.Namespace()).
WithNodeCount(ptr.To(1)).
WithSubnetId(subnetId).
CreateAzureNodePool()
o.Expect(hostedCluster.getAzureDiskSizeGBByNodePool(np1Name)).Should(o.ContainSubstring("120"))
exutil.By("create nodepool and check root-disk-size (256)")
np2Name := hostedCluster.name + "-2"
NewAzureNodePool(np2Name, hostedCluster.name, oc.Namespace()).
WithNodeCount(ptr.To(1)).
WithRootDiskSize(ptr.To(256)).
WithSubnetId(subnetId).
CreateAzureNodePool()
o.Expect(hostedCluster.getAzureDiskSizeGBByNodePool(np2Name)).Should(o.ContainSubstring("256"))
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(3), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
})
// author: [email protected], [email protected] (for HOSTEDCP-1411)
// Also include a part of https://issues.redhat.com/browse/HOSTEDCP-1411
g.It("Longduration-NonPreRelease-Author:liangli-Critical-49174-[HyperShiftINSTALL] Create Azure infrastructure and nodepools via CLI [Serial]", func() {
if iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49174 is for azure - skipping test ...")
}
caseID := "49174"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{oc: oc, dir: dir, iaasPlatform: iaasPlatform}
exutil.By("install HyperShift operator")
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAzureCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(1).
withResourceGroupTags("foo=bar,baz=quux").
withReleaseImage(release)
defer installHelper.destroyAzureHostedClusters(createCluster)
hostedCluster := installHelper.createAzureHostedClusters(createCluster)
exutil.By("Scale up nodepool")
doOcpReq(oc, OcpScale, false, "nodepool", hostedCluster.name, "--namespace", hostedCluster.namespace, "--replicas=2")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
// A part of https://issues.redhat.com/browse/HOSTEDCP-1411
_, err = oc.AdminKubeClient().CoreV1().Secrets("kube-system").Get(context.Background(), "azure-credentials", metav1.GetOptions{})
if errors.IsNotFound(err) {
e2e.Logf("Root creds not found on the management cluster, skip the Azure resource group check")
return
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Checking tags on the Azure resource group")
rgName, err := hostedCluster.getResourceGroupName()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Found resource group name = %s", rgName)
azClientSet := exutil.NewAzureClientSetWithRootCreds(oc)
resourceGroupsClientGetResponse, err := azClientSet.GetResourceGroupClient(nil).Get(context.Background(), rgName, nil)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(*resourceGroupsClientGetResponse.Tags["foo"]).To(o.Equal("bar"))
o.Expect(*resourceGroupsClientGetResponse.Tags["baz"]).To(o.Equal("quux"))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:heli-Critical-64405-[HyperShiftINSTALL] Create a cluster in the AWS Region ap-southeast-3 [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 64405 is for AWS - skipping test ...")
}
region, err := getClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
if region != "ap-southeast-3" {
g.Skip("region is " + region + " while 64405 is for ap-southeast-3 - skipping test ...")
}
caseID := "64405"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err = os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(2)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
})
// Authors: [email protected], [email protected] (the OCPBUGS-19674 and OCPBUGS-20163 part only)
// Test run duration: ~30min
g.It("Longduration-NonPreRelease-Author:heli-Critical-62085-Critical-60483-Critical-64808-[HyperShiftINSTALL] The cluster should be deleted successfully when there is no identity provider [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 62085,60483,64808 is for AWS - skipping test ...")
}
caseID := "62085-60483-64808"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(2).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
// For OCPBUGS-19674 and OCPBUGS-20163 (clone of the former)
{
exutil.By("Make sure the API server is exposed via Route")
o.Expect(hostedCluster.getSvcPublishingStrategyType(hcServiceAPIServer)).To(o.Equal(hcServiceTypeRoute))
exutil.By("Make sure the hosted cluster reports correct control plane endpoint port")
o.Expect(hostedCluster.getControlPlaneEndpointPort()).To(o.Equal("443"))
}
exutil.By("delete OpenID connect from aws IAM Identity providers")
infraID := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, `-ojsonpath={.spec.infraID}`)
provider := fmt.Sprintf("%s.s3.%s.amazonaws.com/%s", bucketName, region, infraID)
e2e.Logf("trying to delete OpenIDConnectProvider: %s", provider)
clusterinfra.GetAwsCredentialFromCluster(oc)
iamClient := exutil.NewIAMClient()
o.Expect(iamClient.DeleteOpenIDConnectProviderByProviderName(provider)).ShouldNot(o.HaveOccurred())
exutil.By("update control plane policy to remove security operations")
roleAndPolicyName := infraID + "-control-plane-operator"
var policyDocument = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:CreateVpcEndpoint",
"ec2:ModifyVpcEndpoint",
"ec2:DeleteVpcEndpoints",
"ec2:CreateTags",
"route53:ListHostedZones",
"ec2:DescribeVpcs"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets"
],
"Resource": "arn:aws:route53:::hostedzone/Z08584472H531BKOV71X7"
}
]
}`
policy, err := iamClient.GetRolePolicy(roleAndPolicyName, roleAndPolicyName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("original role policy is %s", policy)
o.Expect(iamClient.UpdateRolePolicy(roleAndPolicyName, roleAndPolicyName, policyDocument)).NotTo(o.HaveOccurred())
policy, err = iamClient.GetRolePolicy(roleAndPolicyName, roleAndPolicyName)
e2e.Logf("updated role policy is %s", policy)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(policy).ShouldNot(o.ContainSubstring("SecurityGroup"))
exutil.By("ocp-64808 check hosted condition ValidAWSIdentityProvider should be unknown")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="ValidAWSIdentityProvider")].status}`)
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("False"), fmt.Sprintf("%s expected condition ValidAWSIdentityProvider False status not found error", hostedCluster.name))
})
g.It("Longduration-NonPreRelease-Author:heli-Critical-60484-[HyperShiftINSTALL] HostedCluster deletion shouldn't hang when OIDC provider/STS is configured incorrectly [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 60484 is for AWS - skipping test ...")
}
caseID := "60484"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
installHelper.newAWSS3Client()
installHelper.createAWSS3Bucket()
exutil.By("install HO without s3 credentials")
var installCMD = fmt.Sprintf("hypershift install "+
"--oidc-storage-provider-s3-bucket-name %s "+
"--oidc-storage-provider-s3-region %s "+
"--private-platform AWS "+
"--aws-private-creds %s "+
"--aws-private-region=%s",
bucketName, region, getAWSPrivateCredentials(), region)
var cmdClient = NewCmdClient().WithShowInfo(true)
defer installHelper.hyperShiftUninstall()
_, err = cmdClient.Run(installCMD).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(0).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate)
defer installHelper.destroyAWSHostedClusters(createCluster)
hc := installHelper.createAWSHostedClusterWithoutCheck(createCluster)
exutil.By("check hosted cluster condition ValidOIDCConfiguration")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", "-o", `jsonpath={.status.conditions[?(@.type=="ValidOIDCConfiguration")].status}`)
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("False"))
msg := doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", "-o", `jsonpath={.status.conditions[?(@.type=="ValidOIDCConfiguration")].message}`)
e2e.Logf("error msg of condition ValidOIDCConfiguration is %s", msg)
})
g.It("Longduration-NonPreRelease-Author:heli-Critical-67828-[HyperShiftINSTALL] non-serving components land on non-serving nodes versus default workers [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 67828 is for AWS - skipping test ...")
}
if !exutil.IsInfrastructuresHighlyAvailable(oc) {
g.Skip("ocp-67828 is for Infra HA OCP - skipping test ...")
}
msNames := strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachineset, "--ignore-not-found", `-o=jsonpath={.items[*].metadata.name}`), " ")
if len(msNames) < 3 {
g.Skip("ocp-67828 is for Infra HA OCP and expects for 3 machinesets - skipping test ... ")
}
caseID := "67828"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("config mgmt cluster: scale a machineseet to repicas==2")
oriDeletePolicy := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, msNames[2], `-o=jsonpath={.spec.deletePolicy}`)
defer func() {
if oriDeletePolicy == "" {
doOcpReq(oc, OcpPatch, false, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--type=json", "-p", `[{"op": "remove", "path": "/spec/deletePolicy"}]`)
} else {
doOcpReq(oc, OcpPatch, false, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--type=merge", fmt.Sprintf(`--patch={"spec": {"deletePolicy": "%s"}}`, oriDeletePolicy))
}
}()
doOcpReq(oc, OcpPatch, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--type=merge", `--patch={"spec": {"deletePolicy": "Newest"}}`)
oriReplicas := doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], `-o=jsonpath={.spec.replicas}`)
defer doOcpReq(oc, OcpScale, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--replicas="+oriReplicas)
doOcpReq(oc, OcpScale, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--replicas=2")
o.Eventually(func() bool {
return checkMachinesetReplicaStatus(oc, msNames[2])
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("machineset %s are ready", msNames[2]))
// choose msNames[0], msNames[1] as serving component nodes, msNames[2] as non-serving component nodes
var nonServingComponentNodes = strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachine, "-l", fmt.Sprintf("machine.openshift.io/cluster-api-machineset=%s", msNames[2]), `-o=jsonpath={.items[*].status.nodeRef.name}`), " ")
var servingComponentNodes []string
for i := 0; i < 2; i++ {
servingComponentNodes = append(servingComponentNodes, strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachine, "-l", fmt.Sprintf("machine.openshift.io/cluster-api-machineset=%s", msNames[i]), `-o=jsonpath={.items[*].status.nodeRef.name}`), " ")...)
}
exutil.By("install hypershift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
externalDNS: true,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("add label/taint to servingComponentNodes")
defer func() {
removeNodesTaint(oc, servingComponentNodes, servingComponentNodesTaintKey)
removeNodesLabel(oc, servingComponentNodes, servingComponentNodesLabelKey)
}()
for _, no := range servingComponentNodes {
doOcpReq(oc, OcpAdm, true, "taint", "node", no, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", no, servingComponentNodesLabel)
}
exutil.By("add label/taint to nonServingComponentNodes")
defer func() {
removeNodesTaint(oc, nonServingComponentNodes, nonServingComponentTaintKey)
removeNodesLabel(oc, nonServingComponentNodes, nonServingComponentLabelKey)
}()
for _, no := range nonServingComponentNodes {
doOcpReq(oc, OcpAdm, true, "taint", "node", no, nonServingComponentTaint)
doOcpReq(oc, OcpLabel, true, "node", no, nonServingComponentLabel)
}
exutil.By("create MachineHealthCheck for serving component machinesets")
clusterID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
mhcBaseDir := exutil.FixturePath("testdata", "hypershift")
mhcTemplate := filepath.Join(mhcBaseDir, "mhc.yaml")
mhc := make([]mhcDescription, 2)
for i := 0; i < 2; i++ {
mhc[i] = mhcDescription{
Clusterid: clusterID,
Maxunhealthy: "100%",
MachinesetName: msNames[i],
Name: "mhc-67828-" + msNames[i],
Namespace: machineAPINamespace,
template: mhcTemplate,
}
}
defer mhc[0].deleteMhc(oc, "mhc-67828-"+msNames[0]+".template")
mhc[0].createMhc(oc, "mhc-67828-"+msNames[0]+".template")
defer mhc[1].deleteMhc(oc, "mhc-67828-"+msNames[1]+".template")
mhc[1].createMhc(oc, "mhc-67828-"+msNames[1]+".template")
exutil.By("create a hosted cluster")
release, er := exutil.GetReleaseImage(oc)
o.Expect(er).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID+"-"+strings.ToLower(exutil.RandStr(5))).
withNodePoolReplicas(2).
withAnnotation("hypershift.openshift.io/topology", "dedicated-request-serving-components").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS).
withReleaseImage(release)
defer func() {
exutil.By("in defer function, destroy the hosted cluster")
installHelper.destroyAWSHostedClusters(createCluster)
exutil.By("check the previous serving nodes are deleted and new serving nodes are created (machinesets are still in ready status)")
o.Eventually(func() bool {
for _, no := range servingComponentNodes {
noinfo := doOcpReq(oc, OcpGet, false, "no", "--ignore-not-found", no)
if strings.TrimSpace(noinfo) != "" {
return false
}
}
for i := 0; i < 2; i++ {
if !checkMachinesetReplicaStatus(oc, msNames[i]) {
return false
}
}
return true
}, 2*DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("serving node are not deleted %+v", servingComponentNodes))
exutil.By("no cluster label annotation in the new serving nodes")
for i := 0; i < 2; i++ {
for _, no := range strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachine, "-l", fmt.Sprintf("machine.openshift.io/cluster-api-machineset=%s", msNames[i]), `-o=jsonpath={.items[*].status.nodeRef.name}`), " ") {
o.Expect(doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-ojsonpath={.labels.hypershift\.openshift\.io/cluster}`)).Should(o.BeEmpty())
o.Expect(doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-ojsonpath={.labels.hypershift\.openshift\.io/cluster-name}`)).Should(o.BeEmpty())
o.Expect(doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-ojsonpath={.spec.taints[?(@.key=="hypershift.openshift.io/cluster")].value}`)).Should(o.BeEmpty())
}
}
}()
hc := installHelper.createAWSHostedClusters(createCluster)
hcpNS := hc.namespace + "-" + hc.name
exutil.By("check hostedcluster annotation")
clusterSchValue := doOcpReq(oc, OcpGet, true, "-n", hc.namespace, "hostedcluster", hc.name, "--ignore-not-found", `-ojsonpath={.metadata.annotations.hypershift\.openshift\.io/cluster-scheduled}`)
o.Expect(clusterSchValue).Should(o.Equal("true"))
clusterTopology := doOcpReq(oc, OcpGet, true, "-n", hc.namespace, "hostedcluster", hc.name, "--ignore-not-found", `-ojsonpath={.metadata.annotations.hypershift\.openshift\.io/topology}`)
o.Expect(clusterTopology).Should(o.Equal("dedicated-request-serving-components"))
exutil.By("check hosted cluster hcp serving components' node allocation")
var servingComponentsNodeLocation = make(map[string]struct{})
hcpServingComponents := []string{"kube-apiserver", "ignition-server-proxy", "oauth-openshift", "private-router"}
for _, r := range hcpServingComponents {
nodes := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp="+r, `-ojsonpath={.items[*].spec.nodeName}`), " ")
for _, n := range nodes {
o.Expect(n).Should(o.BeElementOf(servingComponentNodes))
servingComponentsNodeLocation[n] = struct{}{}
}
}
o.Expect(servingComponentsNodeLocation).ShouldNot(o.BeEmpty())
exutil.By("check serving nodes hcp labels and taints are generated automatically on the serving nodes")
for no := range servingComponentsNodeLocation {
cluster := doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-o=jsonpath={.metadata.labels.hypershift\.openshift\.io/cluster}`)
o.Expect(cluster).Should(o.Equal(hcpNS))
clusterName := doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-o=jsonpath={.metadata.labels.hypershift\.openshift\.io/cluster-name}`)
o.Expect(clusterName).Should(o.Equal(hc.name))
hcpTaint := doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-o=jsonpath={.spec.taints[?(@.key=="hypershift.openshift.io/cluster")].value}`)
o.Expect(hcpTaint).Should(o.Equal(hcpNS))
}
hcpNonServingComponents := []string{
"cloud-controller-manager",
"aws-ebs-csi-driver-controller",
"capi-provider-controller-manager",
"catalog-operator",
"certified-operators-catalog",
"cloud-network-config-controller",
"cluster-api",
"cluster-autoscaler",
"cluster-network-operator",
"cluster-node-tuning-operator",
"cluster-policy-controller",
"cluster-version-operator",
"community-operators-catalog",
"control-plane-operator",
"csi-snapshot-controller",
"csi-snapshot-controller-operator",
"csi-snapshot-webhook",
"dns-operator",
"etcd",
"hosted-cluster-config-operator",
"ignition-server",
"ingress-operator",
"konnectivity-agent",
"kube-controller-manager",
"kube-scheduler",
"machine-approver",
"multus-admission-controller",
"network-node-identity",
"olm-operator",
"openshift-apiserver",
"openshift-controller-manager",
"openshift-oauth-apiserver",
"openshift-route-controller-manager",
"ovnkube-control-plane",
"packageserver",
"redhat-marketplace-catalog",
"redhat-operators-catalog",
}
for _, r := range hcpNonServingComponents {
nodes := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp="+r, `-o=jsonpath={.items[*].spec.nodeName}`), " ")
for _, n := range nodes {
o.Expect(n).Should(o.BeElementOf(nonServingComponentNodes))
}
}
//no app labels components
hcpNonServingComponentsWithoutAppLabels := []string{
"aws-ebs-csi-driver-operator",
"cluster-image-registry-operator",
"cluster-storage-operator",
}
for _, r := range hcpNonServingComponentsWithoutAppLabels {
nodes := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lname="+r, `-o=jsonpath={.items[*].spec.nodeName}`), " ")
for _, n := range nodes {
o.Expect(n).Should(o.BeElementOf(nonServingComponentNodes))
}
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:heli-Critical-67721-[HyperShiftINSTALL] Hypershift Operator version validation is not skipping version checks for node pools [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 67721 is for AWS - skipping test ...")
}
caseID := "67721"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("check hosted cluster supported version")
supportedVersion := doOcpReq(oc, OcpGet, true, "configmap", "-n", "hypershift", "supported-versions", `-ojsonpath={.data.supported-versions}`)
e2e.Logf("supported version is: " + supportedVersion)
minSupportedVersion, err := getVersionWithMajorAndMinor(getMinSupportedOCPVersion())
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(supportedVersion).Should(o.ContainSubstring(minSupportedVersion))
exutil.By("get max unsupported HostedClusters version nightly release")
maxUnsupportedVersion, err := getVersionWithMajorAndMinor(getLatestUnsupportedOCPVersion())
o.Expect(err).ShouldNot(o.HaveOccurred())
release, err := exutil.GetLatestNightlyImage(maxUnsupportedVersion)
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("create HostedClusters with unsupported version")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID).
withReleaseImage(release).
withNodePoolReplicas(1)
defer installHelper.destroyAWSHostedClusters(createCluster)
hc := installHelper.createAWSHostedClusterWithoutCheck(createCluster)
exutil.By("check hc condition & nodepool condition")
o.Eventually(func() bool {
hcStatus := doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", `-o=jsonpath={.status.conditions[?(@.type=="ValidReleaseImage")].status}`)
if hcStatus != "False" {
return false
}
npStatus := doOcpReq(oc, OcpGet, false, "nodepool", "-n", hc.namespace, fmt.Sprintf(`-o=jsonpath={.items[?(@.spec.clusterName=="%s")].status.conditions[?(@.type=="ValidReleaseImage")].status}`, hc.name))
for _, st := range strings.Split(npStatus, " ") {
if st != "False" {
return false
}
}
return true
}, LongTimeout, LongTimeout/30).Should(o.BeTrue())
exutil.By("add annotation to skip release check")
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hc.name, "-n", hc.namespace, "hypershift.openshift.io/skip-release-image-validation=true")
skipReleaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hc.name, "-n", hc.namespace, `-o=jsonpath={.metadata.annotations.hypershift\.openshift\.io/skip-release-image-validation}`)
o.Expect(skipReleaseImage).Should(o.ContainSubstring("true"))
exutil.By("check nodepool and hc to be recovered")
o.Eventually(func() bool {
hcStatus := doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", `-o=jsonpath={.status.conditions[?(@.type=="ValidReleaseImage")].status}`)
if hcStatus != "True" {
return false
}
return true
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "hostedcluster ValidReleaseImage could not be recovered back error")
o.Eventually(func() bool {
npStatus := doOcpReq(oc, OcpGet, false, "nodepool", "-n", hc.namespace, fmt.Sprintf(`-o=jsonpath={.items[?(@.spec.clusterName=="%s")].status.conditions[?(@.type=="ValidReleaseImage")].status}`, hc.name))
for _, st := range strings.Split(npStatus, " ") {
if st != "True" {
return false
}
}
return true
}, LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ValidReleaseImage could not be recovered back error")
o.Eventually(hc.pollHostedClustersReady(), ClusterInstallTimeout, ClusterInstallTimeout/10).Should(o.BeTrue(), "AWS HostedClusters install error")
exutil.By("create a new nodepool")
replica := 1
npName := caseID + strings.ToLower(exutil.RandStrDefault())
NewAWSNodePool(npName, hc.name, hc.namespace).
WithNodeCount(&replica).
WithReleaseImage(release).
CreateAWSNodePool()
o.Eventually(hc.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:heli-Critical-67278-Critical-69222-[HyperShiftINSTALL] Test embargoed cluster upgrades imperceptibly [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 67278 and 69222 are for AWS - skipping test ...")
}
caseID := "67278-69222"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(2).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS).
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
hcpNS := hostedCluster.namespace + "-" + hostedCluster.name
exutil.By("check hostedcluster nodes ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
exutil.By("ocp-69222 check hosted cluster only expost port 443")
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.status.controlPlaneEndpoint.port}`)).Should(o.Equal("443"))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hcpNS, "service", "private-router", `-o=jsonpath={.spec.ports[?(@.targetPort=="https")].port}`)).Should(o.Equal("443"))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hcpNS, "service", "router", `-o=jsonpath={.spec.ports[?(@.targetPort=="https")].port}`)).Should(o.Equal("443"))
exutil.By("get management cluster cluster version and find the latest CI image")
hcpRelease := doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-ojsonpath={.spec.release.image}`)
mgmtVersion, mgmtBuild, err := exutil.GetClusterVersion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("hcp image is %s and mgmt cluster image is %s", hcpRelease, mgmtBuild)
ciImage, err := exutil.GetLatestImage(architecture.ClusterArchitecture(oc).String(), "ocp", mgmtVersion+".0-0.ci")
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("upgrade hcp to latest ci image by controlPlaneRelease")
doOcpReq(oc, OcpPatch, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, "--type=merge", fmt.Sprintf(`--patch={"spec": {"controlPlaneRelease": {"image":"%s"}}}`, ciImage))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.spec.controlPlaneRelease.image}`)).Should(o.ContainSubstring(ciImage))
exutil.By("check clusterversion operator in hcp is updated to ci image")
o.Eventually(func() bool {
images := doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp=cluster-version-operator", "--ignore-not-found", `-o=jsonpath={.items[*].spec.containers[*].image}`)
for _, image := range strings.Split(images, " ") {
if !strings.Contains(image, ciImage) {
return false
}
}
return true
}, LongTimeout, LongTimeout/20).Should(o.BeTrue(), "cluster version operator in hcp image not updated error")
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.spec.release.image}`)).Should(o.ContainSubstring(hcpRelease))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.status.version.history[?(@.state=="Completed")].version}`)).Should(o.ContainSubstring(mgmtBuild))
o.Expect(doOcpReq(oc, OcpGet, true, "--kubeconfig="+hostedCluster.hostedClustersKubeconfigFile, "clusterversion", "version", `-o=jsonpath={.status.history[?(@.state=="Completed")].version}`)).Should(o.ContainSubstring(mgmtBuild))
o.Expect(doOcpReq(oc, OcpGet, true, "--kubeconfig="+hostedCluster.hostedClustersKubeconfigFile, "featuregate", "cluster", "--ignore-not-found", `-o=jsonpath={.status.featureGates[0].version}`)).Should(o.ContainSubstring(mgmtBuild))
exutil.By("create a new nodepool and check its version is still the old one")
npName := fmt.Sprintf("np-67278-%s", exutil.GetRandomString())
nodeCount := 1
defer hostedCluster.deleteNodePool(npName)
NewAWSNodePool(npName, hostedCluster.name, hostedCluster.namespace).WithNodeCount(&nodeCount).CreateAWSNodePool()
o.Eventually(hostedCluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout+DefaultTimeout, (LongTimeout+DefaultTimeout)/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "nodepool", npName, "--ignore-not-found", `-o=jsonpath={.spec.release.image}`)).Should(o.ContainSubstring(hcpRelease))
})
// author: [email protected]
// only test OCP-62972 step 1: HO install param conflict
// the rest of the steps are covered by https://github.com/openshift/release/blob/dbe448dd31754327d60921b3c06d966b5ef8bf7d/ci-operator/step-registry/cucushift/hypershift-extended/install-private/cucushift-hypershift-extended-install-private-commands.sh#L11
g.It("Longduration-NonPreRelease-Author:heli-High-62972-[HyperShiftINSTALL] Check conditional updates on HyperShift Hosted Control Plane [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 62972 is for AWS - skipping test ...")
}
caseID := "62972"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
region: region,
}
installHelper.newAWSS3Client()
defer installHelper.deleteAWSS3Bucket()
installHelper.createAWSS3Bucket()
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift install "+
"--oidc-storage-provider-s3-bucket-name %s "+
"--oidc-storage-provider-s3-credentials %s "+
"--oidc-storage-provider-s3-region %s "+
"--enable-cvo-management-cluster-metrics-access=true "+
"--rhobs-monitoring=true ",
installHelper.bucketName, installHelper.dir+"/credentials", installHelper.region)
output, err := bashClient.Run(cmd).Output()
o.Expect(err).Should(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("when invoking this command with the --rhobs-monitoring flag, the --enable-cvo-management-cluster-metrics-access flag is not supported"))
})
// Author: [email protected]
g.It("NonPreRelease-Longduration-Author:fxie-Critical-70614-[HyperShiftINSTALL] Test HostedCluster condition type AWSDefaultSecurityGroupDeleted [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Running on %s while the test case is AWS-only, skipping", iaasPlatform))
}
var (
namePrefix = fmt.Sprintf("70614-%s", strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", namePrefix)
bucketName = fmt.Sprintf("%s-bucket", namePrefix)
hcName = fmt.Sprintf("%s-hc", namePrefix)
lbName = fmt.Sprintf("%s-lb", namePrefix)
targetConditionType = "AWSDefaultSecurityGroupDeleted"
watchTimeoutSec = 900
)
var (
unstructured2TypedCondition = func(condition any, typedCondition *metav1.Condition) {
g.GinkgoHelper()
conditionMap, ok := condition.(map[string]any)
o.Expect(ok).To(o.BeTrue(), "Failed to cast condition to map[string]any")
conditionJson, err := json.Marshal(conditionMap)
o.Expect(err).ShouldNot(o.HaveOccurred())
err = json.Unmarshal(conditionJson, typedCondition)
o.Expect(err).ShouldNot(o.HaveOccurred())
}
)
exutil.By("Installing the Hypershift Operator")
defer func() {
err := os.RemoveAll(tempDir)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err := os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a HostedCluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
// The number of worker nodes (of the hosted cluster) is irrelevant, so we only create one.
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withReleaseImage(release)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Getting default worker SG of the hosted cluster")
defaultWorkerSGID := doOcpReq(oc, OcpGet, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, `-o=jsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}`)
e2e.Logf("Found defaultWorkerSecurityGroupID = %s", defaultWorkerSGID)
exutil.By("Creating a dummy load balancer which has the default worker SG attached")
subnet := doOcpReq(oc, OcpGet, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, `-o=jsonpath={.spec.platform.aws.cloudProviderConfig.subnet.id}`)
e2e.Logf("Found subnet of the hosted cluster = %s", subnet)
clusterinfra.GetAwsCredentialFromCluster(oc)
elbClient := elb.New(session.Must(session.NewSession()), aws.NewConfig().WithRegion(region))
defer func() {
_, err = elbClient.DeleteLoadBalancer(&elb.DeleteLoadBalancerInput{
LoadBalancerName: aws.String(lbName),
})
// If the load balancer does not exist or has already been deleted, the call to DeleteLoadBalancer still succeeds.
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = elbClient.CreateLoadBalancer(&elb.CreateLoadBalancerInput{
Listeners: []*elb.Listener{
{
InstancePort: aws.Int64(80),
InstanceProtocol: aws.String("HTTP"),
LoadBalancerPort: aws.Int64(80),
Protocol: aws.String("HTTP"),
},
},
LoadBalancerName: aws.String(lbName),
Subnets: aws.StringSlice([]string{subnet}),
SecurityGroups: aws.StringSlice([]string{defaultWorkerSGID}),
})
if err != nil {
// Log a more granular error message if possible
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Error creating AWS load balancer (%s): %v", aerr.Code(), aerr)
}
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error creating AWS load balancer")
}
exutil.By("Delete the HostedCluster without waiting for the finalizers (non-blocking)")
doOcpReq(oc, OcpDelete, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, "--wait=false")
exutil.By("Polling until the AWSDefaultSecurityGroupDeleted condition is in false status")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, false, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, fmt.Sprintf(`-o=jsonpath={.status.conditions[?(@.type=="%s")].status}`, targetConditionType))
}, LongTimeout, LongTimeout/10).Should(o.Equal("False"), "Timeout waiting for the AWSDefaultSecurityGroupDeleted condition to be in false status")
targetConditionMessage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, fmt.Sprintf(`-o=jsonpath={.status.conditions[?(@.type=="%s")].message}`, targetConditionType))
e2e.Logf("Found message of the AWSDefaultSecurityGroupDeleted condition = %s", targetConditionMessage)
exutil.By("Start watching the HostedCluster with a timeout")
hcRestMapping, err := oc.RESTMapper().RESTMapping(schema.GroupKind{
Group: "hypershift.openshift.io",
Kind: "HostedCluster",
})
o.Expect(err).ShouldNot(o.HaveOccurred())
w, err := oc.AdminDynamicClient().Resource(hcRestMapping.Resource).Namespace(hostedCluster.namespace).Watch(context.Background(), metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", hostedCluster.name).String(),
TimeoutSeconds: ptr.To(int64(watchTimeoutSec)),
})
o.Expect(err).ShouldNot(o.HaveOccurred())
defer w.Stop()
exutil.By("Now delete the load balancer created above")
_, err = elbClient.DeleteLoadBalancer(&elb.DeleteLoadBalancerInput{
LoadBalancerName: aws.String(lbName),
})
if err != nil {
// Log a more granular error message if possible
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Error deleting AWS load balancer (%s): %v", aerr.Code(), aerr)
}
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error deleting AWS load balancer")
}
exutil.By("Examining MODIFIED events that occurs on the HostedCluster")
var typedCondition metav1.Condition
var targetConditionExpected bool
resultChan := w.ResultChan()
outerForLoop:
for event := range resultChan {
if event.Type != watch.Modified {
continue
}
e2e.Logf("MODIFIED event captured")
// Avoid conversion to typed object as it'd bring in quite a few dependencies to the repo
hcUnstructured, ok := event.Object.(*unstructured.Unstructured)
o.Expect(ok).To(o.BeTrue(), "Failed to cast event.Object into *unstructured.Unstructured")
conditions, found, err := unstructured.NestedSlice(hcUnstructured.Object, "status", "conditions")
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(found).To(o.BeTrue())
for _, condition := range conditions {
unstructured2TypedCondition(condition, &typedCondition)
if typedCondition.Type != targetConditionType {
continue
}
if typedCondition.Status == metav1.ConditionTrue {
e2e.Logf("Found AWSDefaultSecurityGroupDeleted condition = %s", typedCondition)
targetConditionExpected = true
break outerForLoop
}
e2e.Logf("The AWSDefaultSecurityGroupDeleted condition is found to be in %s status, keep waiting", typedCondition.Status)
}
}
// The result channel could be closed since the beginning, e.g. when an inappropriate ListOptions is passed to Watch
// We need to ensure this is not the case
o.Expect(targetConditionExpected).To(o.BeTrue(), "Result channel closed unexpectedly before the AWSDefaultSecurityGroupDeleted condition becomes true in status")
exutil.By("Polling until the HostedCluster is gone")
o.Eventually(func() bool {
_, err := oc.AdminDynamicClient().Resource(hcRestMapping.Resource).Namespace(hostedCluster.namespace).Get(context.Background(), hostedCluster.name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return true
}
o.Expect(err).ShouldNot(o.HaveOccurred(), fmt.Sprintf("Unexpected error: %s", errors.ReasonForError(err)))
e2e.Logf("Still waiting for the HostedCluster to disappear")
return false
}, LongTimeout, LongTimeout/10).Should(o.BeTrue(), "Timed out waiting for the HostedCluster to disappear")
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:heli-Critical-64409-[HyperShiftINSTALL] Ensure ingress controllers are removed before load balancers [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 64409 is for AWS - skipping test ...")
}
caseID := "64409"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
// files to store delete time result
var svcDeleteTimeStampFile = dir + "/svc-deletion-time-stamp-result.txt"
var ingressControllerDeleteTimeStampFile = dir + "/ingress-controller-deletion-time-stamp-result.txt"
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters config")
nodeReplicas := 1
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(nodeReplicas).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS)
exutil.By("add watcher to catch the resource deletion info")
svcCtx, svcCancel := context.WithTimeout(context.Background(), ClusterInstallTimeout+LongTimeout)
defer svcCancel()
operatorCtx, operatorCancel := context.WithTimeout(context.Background(), ClusterInstallTimeout+LongTimeout)
defer operatorCancel()
defer func() {
// destroy hosted cluster
installHelper.destroyAWSHostedClusters(createCluster)
e2e.Logf("check destroy AWS HostedClusters")
o.Eventually(pollGetHostedClusters(oc, createCluster.Namespace), ShortTimeout, ShortTimeout/10).ShouldNot(o.ContainSubstring(createCluster.Name), "destroy AWS HostedClusters error")
exutil.By("check the ingress controllers are removed before load balancers")
// get resource deletion time
svcDelTimeStr, err := os.ReadFile(svcDeleteTimeStampFile)
o.Expect(err).NotTo(o.HaveOccurred())
ingressDelTimeStr, err := os.ReadFile(ingressControllerDeleteTimeStampFile)
o.Expect(err).NotTo(o.HaveOccurred())
ingressDelTime, err := time.Parse(time.RFC3339, string(ingressDelTimeStr))
o.Expect(err).NotTo(o.HaveOccurred())
routeSVCTime, err := time.Parse(time.RFC3339, string(svcDelTimeStr))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the ingress controllers are removed before load balancers")
e2e.Logf("parsed deletion time ingresscontroller: %s, route svc: %s", ingressDelTime, routeSVCTime)
o.Expect(ingressDelTime.After(routeSVCTime)).Should(o.BeFalse())
}()
exutil.By("create a hosted cluster")
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(nodeReplicas), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
hostedCluster.oc.SetGuestKubeconf(hostedCluster.hostedClustersKubeconfigFile)
exutil.By("start a goroutine to watch delete time for the hosted cluster svc router-default")
svcName := "router-default"
svcNamespace := "openshift-ingress"
startWatch(svcCtx, hostedCluster.hostedClustersKubeconfigFile, watchInfo{
resourceType: Service,
name: svcName,
namespace: svcNamespace,
deleteFunc: func(obj interface{}) {
svcObj, ok := obj.(*corev1.Service)
if ok != true {
return
}
if svcObj.Name == svcName && svcObj.DeletionTimestamp.IsZero() == false {
e2e.Logf("[deleteFunc] catched the deletion time of service %s in %s, deletionTimestamp is %s", svcObj.Name, svcObj.Namespace, svcObj.DeletionTimestamp.String())
err = os.WriteFile(svcDeleteTimeStampFile, []byte(fmt.Sprintf("%s", svcObj.DeletionTimestamp.Format(time.RFC3339))), 0644)
if err != nil {
e2e.Logf("[deleteFunc] fail to write service %s in %s deletion time [%s] into local file %s, error %s", svcObj.Name, svcObj.Namespace, svcObj.DeletionTimestamp.String(), svcDeleteTimeStampFile, err.Error())
}
svcCancel()
}
},
})
exutil.By("start a goroutine to watch delete time for the hosted cluster ingresscontroller default")
icName := "default"
icNamespace := "openshift-ingress-operator"
startWatchOperator(operatorCtx, hostedCluster.hostedClustersKubeconfigFile, operatorWatchInfo{
group: "operator.openshift.io",
version: "v1",
resources: "ingresscontrollers",
name: icName,
namespace: icNamespace,
deleteFunc: func(obj []byte) {
ingressObj := operatorv1.IngressController{}
if json.Unmarshal(obj, &ingressObj) != nil {
e2e.Logf("[deleteFunc] unmarshal ingresscontrollers %s in %s error %s", icName, icNamespace, err.Error())
return
}
if ingressObj.Name == icName && ingressObj.DeletionTimestamp.IsZero() == false {
e2e.Logf("[deleteFunc] catched deletion time of ingresscontroller %s in %s, deletionTimestamp is %s", ingressObj.Name, ingressObj.Namespace, ingressObj.DeletionTimestamp.String())
err = os.WriteFile(ingressControllerDeleteTimeStampFile, []byte(fmt.Sprintf("%s", ingressObj.DeletionTimestamp.Format(time.RFC3339))), 0644)
if err != nil {
e2e.Logf("[deleteFunc] fail to write ingresscontroller %s in %s deletion time [%s] into local file %s, error %s", ingressObj.Name, ingressObj.Namespace, ingressObj.DeletionTimestamp.String(), ingressControllerDeleteTimeStampFile, err.Error())
}
operatorCancel()
}
},
})
})
// Author: [email protected]
// Timeout: 60min (test run took ~40min)
g.It("NonPreRelease-Longduration-Author:fxie-Critical-68221-[HyperShiftINSTALL] Test the scheduler to only accept paired Nodes and check scheduler HCs has two Nodes [Disruptive]", func() {
// Variables
var (
testCaseId = "68221"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
mhcTemplate = filepath.Join(fixturePath, "mhc.yaml")
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
mhcNamePrefix = fmt.Sprintf("%s-mhc", resourceNamePrefix)
adminKubeClient = oc.AdminKubeClient()
numWorkersExpected = 3
numMasters = 3
numMsetsExpected = 3
aggregatedErr []error
)
// Utilities
var (
findServingPairIdx = func(servingPairsNodeNames [][]string, podNodeName string) (int, bool) {
e2e.Logf("Finding serving pair index")
for idx, servingPairNodeNames := range servingPairsNodeNames {
if slices.Contains(servingPairNodeNames, podNodeName) {
return idx, true
}
}
return -1, false
}
checkPodNodeAffinity = func(pod *corev1.Pod, hostedClusterIdentifier string) {
nodeSelectorRequirements := pod.Spec.Affinity.NodeAffinity.
RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions
expectedNodeSelectorRequirements := []corev1.NodeSelectorRequirement{
{
Key: servingComponentNodesLabelKey,
Operator: corev1.NodeSelectorOpIn,
Values: []string{"true"},
},
{
Key: hypershiftClusterLabelKey,
Operator: corev1.NodeSelectorOpIn,
Values: []string{hostedClusterIdentifier},
},
}
// Assume the key to be unique across NodeSelectorRequirements
sort.Slice(nodeSelectorRequirements, func(i, j int) bool {
return nodeSelectorRequirements[i].Key < nodeSelectorRequirements[j].Key
})
sort.Slice(expectedNodeSelectorRequirements, func(i, j int) bool {
return expectedNodeSelectorRequirements[i].Key < expectedNodeSelectorRequirements[j].Key
})
// Pretty-print actual and expected NodeSelectorRequirements side-by-side for comparison in case they do not match
if !reflect.DeepEqual(nodeSelectorRequirements, expectedNodeSelectorRequirements) {
e2e.Logf(diff.ObjectGoPrintSideBySide(nodeSelectorRequirements, expectedNodeSelectorRequirements))
e2e.Failf("Unexpected node affinity for pod")
}
e2e.Logf("Node affinity expected")
}
// Delete serving node by scaling down the corresponding serving MachineSet
// Return the name of the MachineSet scaled down, so it can be scaled back up later
deleteServingNode = func(allNodeNames, allMsetNames []string, servingNodeName string) string {
g.GinkgoHelper()
servingNodeIdx := slices.Index(allNodeNames, servingNodeName)
o.Expect(servingNodeIdx).To(o.BeNumerically(">=", 0), fmt.Sprintf("Serving node %s not found in %v", servingNodeName, allNodeNames))
msetName := allMsetNames[servingNodeIdx]
doOcpReq(oc, OcpScale, true, "--replicas=0", fmt.Sprintf("%s/%s", mapiMachineset, msetName), "-n", machineAPINamespace)
exutil.WaitForNodeToDisappear(oc, servingNodeName, LongTimeout, DefaultTimeout/10)
return msetName
}
checkServingNodePairLabelsAndTaints = func(hostedClusterIdentifier string, servingPairIdx int) {
// Get serving nodes
nodeList, err := adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{
hypershiftClusterLabelKey: hostedClusterIdentifier,
osdfmPairedNodeLabelKey: fmt.Sprintf("serving-%v", servingPairIdx),
}).String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCount := len(nodeList.Items); nodeCount != 2 {
var nodeNames []string
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)
}
e2e.Failf("Expect 2 serving nodes but found %v (%v)", nodeCount, nodeNames)
}
for _, node := range nodeList.Items {
o.Expect(taints.TaintExists(node.Spec.Taints, &corev1.Taint{
Effect: "NoSchedule",
Key: hypershiftClusterLabelKey,
Value: hostedClusterIdentifier,
})).To(o.BeTrue())
}
}
// Not all fields of a resource are supported as field selectors.
// Here we list all deployments in the namespace for simplicity.
waitForHostedClusterDeploymentsReady = func(ns string) {
exutil.WaitForDeploymentsReady(context.Background(), func(ctx context.Context) (*appsv1.DeploymentList, error) {
return adminKubeClient.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{})
}, exutil.IsDeploymentReady, LongTimeout, DefaultTimeout/10, false)
}
)
// Report all non-nil errors occurred in deferred functions
defer func() {
o.Expect(errors2.NewAggregate(aggregatedErr)).NotTo(o.HaveOccurred())
}()
// Needs MAPI for MachineSets
exutil.SkipNoCapabilities(oc, "MachineAPI")
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Running on %s while the test case is AWS-only, skipping", iaasPlatform))
}
exutil.By("Getting info about the management cluster")
msetNames := clusterinfra.ListWorkerMachineSetNames(oc)
// In theory the number of MachineSets does not have to be exactly 3 but should be at least 3.
// The following is enforced for alignment with the test case.
if numMset := len(msetNames); numMset != numMsetsExpected {
g.Skip("Expect %v worker MachineSets but found %v, skipping", numMsetsExpected, numMset)
}
mset1Name := msetNames[0]
mset2Name := msetNames[1]
mset3Name := msetNames[2]
e2e.Logf("Found worker MachineSets %v on the management cluster", msetNames)
nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), adminKubeClient)
o.Expect(err).NotTo(o.HaveOccurred())
// In theory the number of ready schedulable Nodes does not have to be exactly 3 but should be at least 3.
// The following is enforced for alignment with the test case.
numReadySchedulableNodes := len(nodeList.Items)
if numReadySchedulableNodes != numWorkersExpected {
g.Skip("Expect %v ready schedulable nodes but found %v, skipping", numWorkersExpected, numReadySchedulableNodes)
}
defer func() {
e2e.Logf("Making sure we ends up with the correct number of nodes and all of them are ready and schedulable")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, DefaultTimeout, true, func(_ context.Context) (bool, error) {
nodeList, err = adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{"node-role.kubernetes.io/worker": ""}).String(),
})
if err != nil {
return false, err
}
if numWorker := len(nodeList.Items); numWorker != numWorkersExpected {
e2e.Logf("Expect %v worker nodes but found %v, keep polling", numWorkersExpected, numWorker)
return false, nil
}
for _, node := range nodeList.Items {
if !e2enode.IsNodeReady(&node) {
e2e.Logf("Worker node %v not ready, keep polling", node.Name)
return false, nil
}
if len(node.Spec.Taints) > 0 {
e2e.Logf("Worker node tainted, keep polling", node.Name)
return false, nil
}
}
return true, nil
})
aggregatedErr = append(aggregatedErr, err)
}()
numNode := numReadySchedulableNodes + numMasters
e2e.Logf("Found %v nodes on the management cluster", numNode)
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Found management cluster region = %s", region)
// Create (non-spot) MachineSets based on existing ones for simplicity
exutil.By("Creating additional worker nodes through MachineSets on the management cluster")
e2e.Logf("Creating 2 MachineSets in the first AZ")
extraMset1Az1Name := mset1Name + fmt.Sprintf("-%s-1", testCaseId)
extraMset1Az1 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset1Az1Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset1Az1.DeleteMachineSet(oc))
}()
extraMset1Az1.CreateMachineSetBasedOnExisting(oc, mset1Name, false)
extraMset2Az1Name := mset1Name + fmt.Sprintf("-%s-2", testCaseId)
extraMset2Az1 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset2Az1Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset2Az1.DeleteMachineSet(oc))
}()
extraMset2Az1.CreateMachineSetBasedOnExisting(oc, mset1Name, false)
e2e.Logf("Creating a MachineSet in the second AZ")
extraMset1Az2Name := mset2Name + fmt.Sprintf("-%s-1", testCaseId)
extraMset1Az2 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset1Az2Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset1Az2.DeleteMachineSet(oc))
}()
extraMset1Az2.CreateMachineSetBasedOnExisting(oc, mset2Name, false)
e2e.Logf("Creating a MachineSet in the third AZ")
extraMset1Az3Name := mset3Name + fmt.Sprintf("-%s-1", testCaseId)
extraMset1Az3 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset1Az3Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset1Az3.DeleteMachineSet(oc))
}()
extraMset1Az3.CreateMachineSetBasedOnExisting(oc, mset3Name, false)
e2e.Logf("Waiting until the desired number of Nodes are ready")
_, err = e2enode.CheckReady(context.Background(), adminKubeClient, numNode+4, LongTimeout)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Getting Node name for each MachineSet and define node grouping")
allMsetNames := []string{mset1Name, mset2Name, mset3Name, extraMset1Az1Name, extraMset2Az1Name, extraMset1Az2Name, extraMset1Az3Name}
e2e.Logf("All MachineSets = %v", allMsetNames)
servingMsetNames := []string{mset1Name, mset2Name, extraMset1Az1Name, extraMset1Az2Name}
e2e.Logf("Serving MachineSets = %v", servingMsetNames)
var allWorkerNodeNames []string
for _, msetName := range allMsetNames {
allWorkerNodeNames = append(allWorkerNodeNames, exutil.GetNodeNameByMachineset(oc, msetName))
}
e2e.Logf("All worker nodes = %v", allWorkerNodeNames)
servingPair1NodeNames := []string{allWorkerNodeNames[0], allWorkerNodeNames[1]}
e2e.Logf("Serving pair 1 nodes = %v", servingPair1NodeNames)
nonServingNode := allWorkerNodeNames[2]
e2e.Logf("Non serving node = %v", nonServingNode)
servingPair2NodeNames := []string{allWorkerNodeNames[3], allWorkerNodeNames[5]}
e2e.Logf("Serving pair 2 nodes = %v", servingPair1NodeNames)
hoPodNodeNames := []string{allWorkerNodeNames[4], allWorkerNodeNames[6]}
e2e.Logf("Nodes for Hypershift Operator Pods = %v", hoPodNodeNames)
servingPairs := [][]string{servingPair1NodeNames, servingPair2NodeNames}
servingPairNodeNames := append(servingPair1NodeNames, servingPair2NodeNames...)
exutil.By("Creating a MachineHealthCheck for each serving MachineSet")
infraId := doOcpReq(oc, OcpGet, true, "infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}")
e2e.Logf("Found infra ID = %s", infraId)
for _, msetName := range servingMsetNames {
mhcName := fmt.Sprintf("%s-%s", mhcNamePrefix, msetName)
parsedTemplate := fmt.Sprintf("%s.template", mhcName)
mhc := mhcDescription{
Clusterid: infraId,
Maxunhealthy: "100%",
MachinesetName: msetName,
Name: mhcName,
Namespace: machineAPINamespace,
template: mhcTemplate,
}
defer mhc.deleteMhc(oc, parsedTemplate)
mhc.createMhc(oc, parsedTemplate)
}
exutil.By("Adding labels and taints on the serving node pairs and a non serving node")
e2e.Logf("Adding labels and taints on the serving node pairs")
defer func() {
for _, servingPairNodeNames := range servingPairs {
for _, nodeName := range servingPairNodeNames {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", nodeName, servingComponentNodesTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nodeName, servingComponentNodesLabelKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nodeName, osdfmPairedNodeLabelKey+"-").Execute()
}
}
}()
for idx, servingPairNodeNames := range servingPairs {
for _, nodeName := range servingPairNodeNames {
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", nodeName, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", nodeName, servingComponentNodesLabel)
doOcpReq(oc, OcpLabel, true, "node", nodeName, fmt.Sprintf("%s=serving-%v", osdfmPairedNodeLabelKey, idx))
}
}
e2e.Logf("Adding labels and taints on the non serving node")
defer func() {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", nonServingNode, nonServingComponentTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nonServingNode, nonServingComponentLabelKey+"-").Execute()
}()
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", nonServingNode, nonServingComponentTaint)
doOcpReq(oc, OcpLabel, true, "node", nonServingNode, nonServingComponentLabel)
exutil.By("Installing the Hypershift Operator")
defer func() {
aggregatedErr = append(aggregatedErr, os.RemoveAll(tempDir))
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
// At this point HO Pods are ready so no need to poll
e2e.Logf("Making sure HO Pods are scheduled on the nodes without taints")
podList, err := adminKubeClient.CoreV1().Pods(hypershiftOperatorNamespace).List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{"app": "operator"}).String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podList.Items).To(o.HaveLen(2))
var actualHoPodNodeNames []string
for _, pod := range podList.Items {
actualHoPodNodeNames = append(actualHoPodNodeNames, pod.Spec.NodeName)
}
sort.Strings(hoPodNodeNames)
sort.Strings(actualHoPodNodeNames)
o.Expect(hoPodNodeNames).To(o.Equal(actualHoPodNodeNames))
exutil.By("Creating a hosted cluster with request serving annotation")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
// The number of worker nodes (of the hosted cluster) is irrelevant, so we will only create one.
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withAnnotation(hcTopologyAnnotationKey, "dedicated-request-serving-components").
withReleaseImage(release)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
hostedClusterIdentifier := fmt.Sprintf("%s-%s", hostedCluster.namespace, hostedCluster.name)
e2e.Logf("Hosted cluster created with identifier = %s", hostedClusterIdentifier)
// At this point (minutes after the installation of the Hypershift operator)
// we expect all labels and taints to be set by controller so no need for polling.
exutil.By("Making sure all hosted cluster components are correctly scheduled")
// No need to check tolerations as the correct scheduling of Pods implies correct toleration settings
exutil.By("Making sure the correct labels and nodeAffinities are set on the request serving components")
requestServingComponentLabelSelector := labels.SelectorFromSet(map[string]string{servingComponentPodLabelKey: "true"})
podList, err = adminKubeClient.CoreV1().Pods(hostedClusterIdentifier).List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items)).NotTo(o.BeZero(), "Empty pod list")
var servingNodeName string
for _, pod := range podList.Items {
podNodeName := pod.Spec.NodeName
if requestServingComponentLabelSelector.Matches(labels.Set(pod.Labels)) {
e2e.Logf("Pod %s belongs to a request serving component", pod.Name)
// Make sure the request serving Pod is correctly scheduled
if len(servingNodeName) == 0 {
servingNodeName = podNodeName
o.Expect(servingPairNodeNames).To(o.ContainElements(servingNodeName), "Pod scheduled on a non serving node")
e2e.Logf("Found serving node = %v", servingNodeName)
} else {
o.Expect(servingNodeName).To(o.Equal(podNodeName), fmt.Sprintf("Expect Pod to be scheduled on serving node %s but scheduled on %s", servingNodeName, podNodeName))
}
// Make sure the request serving Pod has the correct nodeAffinities
checkPodNodeAffinity(&pod, hostedClusterIdentifier)
continue
}
e2e.Logf("Pod %s belongs to a non request serving component", pod.Name)
// Make sure the non request serving Pod is correctly scheduled
o.Expect(nonServingNode).To(o.Equal(podNodeName), fmt.Sprintf("Expect Pod to be scheduled on non serving node %s but scheduled on %s", nonServingNode, podNodeName))
}
o.Expect(servingNodeName).NotTo(o.BeEmpty(), "Serving node not found")
exutil.By("Making sure that labels and taints are correctly set on the serving nodes pair")
servingPairIdx, idxFound := findServingPairIdx(servingPairs, servingNodeName)
o.Expect(idxFound).To(o.BeTrue())
e2e.Logf("Found serving pair index = %v; serving nodes = %v", servingPairIdx, servingPairs[servingPairIdx])
checkServingNodePairLabelsAndTaints(hostedClusterIdentifier, servingPairIdx)
exutil.By("Making sure the cluster-scheduled annotation is set on the HostedCluster")
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostedCluster", hostedCluster.name, "-n", hostedCluster.namespace, `-o=jsonpath={.metadata.annotations.hypershift\.openshift\.io/cluster-scheduled}`).Outputs()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("true"))
exutil.By("Delete the serving node by scaling down the corresponding MachineSet")
var msetName1 string
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run(OcpScale).Args("--replicas=1", fmt.Sprintf("%s/%s", mapiMachineset, msetName1), "-n", machineAPINamespace).Execute()
aggregatedErr = append(aggregatedErr, err)
}()
msetName1 = deleteServingNode(allWorkerNodeNames, allMsetNames, servingNodeName)
exutil.By("Making sure serving components are moved to the other node in the serving node pair")
e2e.Logf("Finding the new (expected) serving node")
var servingNodeName2 string
for _, nodeName := range servingPairs[servingPairIdx] {
if servingNodeName != nodeName {
servingNodeName2 = nodeName
break
}
}
o.Expect(servingNodeName2).NotTo(o.Equal(servingNodeName))
e2e.Logf("Making sure serving component Pods are moved to the new serving node")
waitForHostedClusterDeploymentsReady(hostedClusterIdentifier)
podList, err = adminKubeClient.CoreV1().Pods(hostedClusterIdentifier).List(context.Background(), metav1.ListOptions{
LabelSelector: requestServingComponentLabelSelector.String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items)).NotTo(o.BeZero(), "Empty pod list")
for _, pod := range podList.Items {
e2e.Logf("Pod %s belongs to a request serving component", pod.Name)
o.Expect(servingNodeName2).To(o.Equal(pod.Spec.NodeName), fmt.Sprintf("Expect Pod to be scheduled on serving node %s but scheduled on %s", servingNodeName2, pod.Spec.NodeName))
}
exutil.By("Delete the new serving node by scaling down the corresponding MachineSet")
var msetName2 string
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run(OcpScale).Args("--replicas=1", fmt.Sprintf("%s/%s", mapiMachineset, msetName2), "-n", machineAPINamespace).Execute()
aggregatedErr = append(aggregatedErr, err)
}()
msetName2 = deleteServingNode(allWorkerNodeNames, allMsetNames, servingNodeName2)
exutil.By("Making sure that serving components are moved to a node belonging to the other serving node pair")
waitForHostedClusterDeploymentsReady(hostedClusterIdentifier)
// servingPairIdx = 0 or 1
servingPairIdx2 := 1 - servingPairIdx
e2e.Logf("New serving pair index = %v; serving nodes = %v", servingPairIdx2, servingPairs[servingPairIdx2])
podList, err = adminKubeClient.CoreV1().Pods(hostedClusterIdentifier).List(context.Background(), metav1.ListOptions{
LabelSelector: requestServingComponentLabelSelector.String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items)).NotTo(o.BeZero(), "Empty pod list")
var servingNodeName3 string
for _, pod := range podList.Items {
e2e.Logf("Pod %s belongs to a request serving component", pod.Name)
podNodeName := pod.Spec.NodeName
if len(servingNodeName3) == 0 {
servingNodeName3 = podNodeName
o.Expect(servingPairs[servingPairIdx2]).To(o.ContainElements(servingNodeName3))
e2e.Logf("Found serving node = %v", servingNodeName3)
} else {
o.Expect(servingNodeName3).To(o.Equal(podNodeName))
}
}
o.Expect(servingNodeName3).NotTo(o.BeEmpty(), "Serving node not found")
exutil.By("Making sure that labels and taints are correctly set on the serving node pair")
checkServingNodePairLabelsAndTaints(hostedClusterIdentifier, servingPairIdx2)
exutil.By("Destroying the hosted cluster")
installHelper.destroyAWSHostedClusters(createCluster)
exutil.By("Making sure serving nodes are deleted")
for _, node := range servingPairs[servingPairIdx2] {
exutil.WaitForNodeToDisappear(oc, node, LongTimeout, DefaultTimeout/10)
}
exutil.By("Making sure two new nodes are created by MAPI")
// 4 new MachineSets, 2 scaled down, 2 deleted and then re-created => 2 additional nodes
nodeListFinal, err := e2enode.CheckReady(context.Background(), adminKubeClient, numNode+2, LongTimeout)
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("Making sure that the two new nodes does not contain specific label and taint")
var newNodeCount int
for _, node := range nodeListFinal {
nodeName := node.Name
if slices.Contains(allWorkerNodeNames, nodeName) {
e2e.Logf("Skip old worker node %s", nodeName)
continue
}
if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok {
e2e.Logf("Skip master node %s", nodeName)
continue
}
e2e.Logf("Inspecting labels and taints on new worker node/%s", nodeName)
newNodeCount++
_, ok := node.Labels[hypershiftClusterLabelKey]
o.Expect(ok).To(o.BeFalse())
o.Expect(taints.TaintExists(node.Spec.Taints, &corev1.Taint{
Effect: "NoSchedule",
Key: hypershiftClusterLabelKey,
Value: hostedClusterIdentifier,
})).To(o.BeFalse())
}
o.Expect(newNodeCount).To(o.Equal(2))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:heli-High-64847-[HyperShiftINSTALL] Ensure service type of loadBalancer associated with ingress controller is deleted by ingress-controller role [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 64847 is for AWS - skipping test ...")
}
caseID := "64847"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
namePrefix = fmt.Sprintf("64847-%s", strings.ToLower(exutil.RandStrDefault()))
hcName = "hc-" + strings.ToLower(namePrefix)
bucketName = "hc-" + strings.ToLower(namePrefix)
svcTempFile = dir + "/svc.yaml"
svcName = "test-lb-svc-64847"
testSVC = fmt.Sprintf(`
apiVersion: v1
kind: Service
metadata:
name: %s
namespace: default
spec:
ports:
- port: 80
targetPort: 8080
selector:
name: test-pod
type: LoadBalancer
`, svcName)
)
exutil.By("install hypershift operator")
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: Public,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create a hosted cluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withReleaseImage(release)
hcpNS := createCluster.Namespace + "-" + hcName
defer func() {
exutil.By("destroy hosted cluster in one goroutine")
go func() {
defer g.GinkgoRecover()
installHelper.destroyAWSHostedClusters(createCluster)
}()
if oc.GetGuestKubeconf() != "" {
exutil.By("check LB test SVC is deleted")
o.Eventually(func() bool {
testSVC, err := oc.AsGuestKubeconf().Run(OcpGet).Args("svc", svcName, "--ignore-not-found", `-o=jsonpath={.metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if testSVC == "" {
return true
}
e2e.Logf("check if the test svc is deleted by hcco")
return false
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Timed out waiting for the the ingress-operator pods scaling down to zero")
exutil.By("check HCCO logs that deletion is stuck by LB SVC resources")
routerDefaultSVC, err := oc.AsGuestKubeconf().Run(OcpGet).Args("-n", "openshift-ingress", "svc", "router-default", "--ignore-not-found", `-o=jsonpath={.metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(routerDefaultSVC).Should(o.Equal("router-default"))
hccoPodName := doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp=hosted-cluster-config-operator", "--ignore-not-found", `-o=jsonpath={.items[].metadata.name}`)
_, err = exutil.WaitAndGetSpecificPodLogs(oc, hcpNS, "", hccoPodName, "'Ensuring load balancers are removed'")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("remove ingress-operator debug annotation and scale up ingress-operator")
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hcName, "-n", createCluster.Namespace, "hypershift.openshift.io/debug-deployments-")
doOcpReq(oc, OcpScale, true, "deployment", "ingress-operator", "-n", hcpNS, "--replicas=1")
}
exutil.By("wait until the hosted cluster is deleted successfully")
o.Eventually(pollGetHostedClusters(oc, createCluster.Namespace), LongTimeout, LongTimeout/10).ShouldNot(o.ContainSubstring(hcName), "destroy AWS HostedClusters error")
}()
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
oc.SetGuestKubeconf(hostedCluster.getHostedClusterKubeconfigFile())
exutil.By("annotate the hosted cluster to debug ingress operator")
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, "hypershift.openshift.io/debug-deployments=ingress-operator")
o.Eventually(func() bool {
names := doOcpReq(oc, OcpGet, false, "pod", "-n", hcpNS, "--ignore-not-found", "-lapp=ingress-operator", "-o=jsonpath={.items[*].metadata.name}")
if names == "" {
return true
}
e2e.Logf("Still waiting for the ingress-operator pods scaling down to zero")
return false
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Timed out waiting for the the ingress-operator pods scaling down to zero")
o.Expect(doOcpReq(oc, OcpGet, true, "deploy", "ingress-operator", "-n", hcpNS, "--ignore-not-found", "-o=jsonpath={.spec.replicas}")).Should(o.Equal("0"))
exutil.By("create LB SVC on the hosted cluster")
err = os.WriteFile(svcTempFile, []byte(testSVC), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsGuestKubeconf().WithoutNamespace().Run(OcpCreate).Args("-f", svcTempFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
/*
In the test case below there is no need to verify:
- the correct scheduling of a hosted cluster's serving components.
- that relevant labels and taints are added to the request serving nodes by controller.
- that relevant annotations are added to the HostedCluster by controller.
- that relevant labels, node affinity and tolerations are added to the request serving components by controller.
- that request serving nodes are removed by controller once a HC is gone.
as these are covered by OCP-68221.
Timeout: 1h15min (test run took ~50min)
*/
g.It("NonPreRelease-Longduration-Author:fxie-Critical-69771-[HyperShiftINSTALL] When initial non-serving nodes fill up new pods prefer to go to untainted default nodes instead of scaling non-serving ones [Disruptive]", func() {
// Variables
var (
testCaseId = "69771"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
mhcTemplate = filepath.Join(fixturePath, "mhc.yaml")
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hc1Name = fmt.Sprintf("%s-hc-1", resourceNamePrefix)
hc2Name = fmt.Sprintf("%s-hc-2", resourceNamePrefix)
mhcNamePrefix = fmt.Sprintf("%s-mhc", resourceNamePrefix)
adminKubeClient = oc.AdminKubeClient()
numWorkersExpected = 3
numMasters = 3
numMsetsExpected = 3
errList []error
clusterAutoscaler = `apiVersion: "autoscaling.openshift.io/v1"
kind: "ClusterAutoscaler"
metadata:
name: "default"
spec:
scaleDown:
enabled: true
delayAfterAdd: 10s
delayAfterDelete: 10s
delayAfterFailure: 10s
unneededTime: 10s`
clusterAutoscalerFileName = fmt.Sprintf("%s-clusterautoscaler.yaml", resourceNamePrefix)
machineAutoscalerTemplate = `apiVersion: "autoscaling.openshift.io/v1beta1"
kind: "MachineAutoscaler"
metadata:
name: %[1]s
namespace: "openshift-machine-api"
spec:
minReplicas: 1
maxReplicas: 3
scaleTargetRef:
apiVersion: machine.openshift.io/v1beta1
kind: MachineSet
name: %[1]s`
machineAutoscalerFileName = fmt.Sprintf("%s-machineautoscaler.yaml", resourceNamePrefix)
)
// Aggregated error handling
defer func() {
o.Expect(errors2.NewAggregate(errList)).NotTo(o.HaveOccurred())
}()
exutil.By("Inspecting platform")
exutil.SkipNoCapabilities(oc, "MachineAPI")
exutil.SkipIfPlatformTypeNot(oc, "aws")
msetNames := clusterinfra.ListWorkerMachineSetNames(oc)
// In theory the number of MachineSets does not have to be exactly 3 but should be at least 3.
// The following enforcement is for alignment with the test case only.
if numMset := len(msetNames); numMset != numMsetsExpected {
g.Skip("Expect %v worker machinesets but found %v, skipping", numMsetsExpected, numMset)
}
e2e.Logf("Found worker machinesets %v on the management cluster", msetNames)
nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), adminKubeClient)
o.Expect(err).NotTo(o.HaveOccurred())
// In theory the number of ready schedulable Nodes does not have to be exactly 3 but should be at least 3.
// The following is enforced for alignment with the test case only.
numReadySchedulableNodes := len(nodeList.Items)
if numReadySchedulableNodes != numWorkersExpected {
g.Skip("Expect %v ready schedulable nodes but found %v, skipping", numWorkersExpected, numReadySchedulableNodes)
}
numNode := numReadySchedulableNodes + numMasters
e2e.Logf("Found %v nodes on the management cluster", numNode)
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Found management cluster region = %s", region)
defer func() {
e2e.Logf("Making sure we ends up with the correct number of nodes and all of them are ready and schedulable")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, LongTimeout, true, func(_ context.Context) (bool, error) {
nodeList, err := adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{"node-role.kubernetes.io/worker": ""}).String(),
})
if err != nil {
return false, err
}
if numWorker := len(nodeList.Items); numWorker != numWorkersExpected {
e2e.Logf("Expect %v worker nodes but found %v, keep polling", numWorkersExpected, numWorker)
return false, nil
}
for _, node := range nodeList.Items {
if !e2enode.IsNodeReady(&node) {
e2e.Logf("Worker node %v not ready, keep polling", node.Name)
return false, nil
}
if len(node.Spec.Taints) > 0 {
e2e.Logf("Worker node tainted, keep polling", node.Name)
return false, nil
}
if _, ok := node.Labels[hypershiftClusterLabelKey]; ok {
e2e.Logf("Worker node still has the %v label, keep polling", hypershiftClusterLabelKey)
return false, nil
}
}
return true, nil
})
errList = append(errList, err)
}()
exutil.By("Creating autoscalers")
e2e.Logf("Creating ClusterAutoscaler")
err = os.WriteFile(clusterAutoscalerFileName, []byte(clusterAutoscaler), os.ModePerm)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", clusterAutoscalerFileName).Execute()
errList = append(errList, err)
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", clusterAutoscalerFileName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Creating MachineAutoscaler")
err = os.WriteFile(machineAutoscalerFileName, []byte(fmt.Sprintf(machineAutoscalerTemplate, msetNames[2])), os.ModePerm)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", machineAutoscalerFileName).Execute()
errList = append(errList, err)
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineAutoscalerFileName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating extra worker nodes")
var extraMsetNames []string
for _, msetName := range msetNames {
extraMsetName := fmt.Sprintf("%s-%s-1", msetName, testCaseId)
extraMset := clusterinfra.MachineSetNonSpotDescription{
Name: extraMsetName,
Replicas: 1,
}
defer func() {
errList = append(errList, extraMset.DeleteMachineSet(oc))
}()
extraMset.CreateMachineSetBasedOnExisting(oc, msetName, false)
extraMsetNames = append(extraMsetNames, extraMsetName)
}
e2e.Logf("Waiting until all nodes are ready")
_, err = e2enode.CheckReady(context.Background(), adminKubeClient, numNode+len(extraMsetNames), LongTimeout)
o.Expect(err).ShouldNot(o.HaveOccurred())
/*
Worker nodes at this point:
Worker 1 <-> machineset 1 <-> AZ1
Worker 2 <-> machineset 2 <-> AZ2
Worker 3 <-> machineset 3 <-> AZ3 <-> non-serving node <-> autoscaling enabled
Extra worker 1 <-> extra machineset 1 (based on machineset 1) <-> AZ1
Extra worker 2 <-> extra machineset 2 (based on machineset 2) <-> AZ2
Extra worker 3 <-> extra machineset 3 (based on machineset 3) <-> AZ3 <-> default worker node
Serving node pairs to define:
Serving pair 1 <-> dedicated for serving components of HostedCluster 1 <-> worker 1 + worker 2
Serving pair 2 <-> dedicated for serving components of HostedCluster 2 <-> extra worker 1 + extra worker 2
*/
exutil.By("Defining serving pairs")
e2e.Logf("Getting node name for each machineset")
var workerNodeNames []string
msetNames = append(msetNames, extraMsetNames...)
for _, msetName := range msetNames {
workerNodeNames = append(workerNodeNames, exutil.GetNodeNameByMachineset(oc, msetName))
}
e2e.Logf("Found worker nodes %s on the management cluster", workerNodeNames)
servingPair1Indices := []int{0, 1}
var servingPair1NodesNames, servingPair1MsetNames []string
for _, idx := range servingPair1Indices {
servingPair1NodesNames = append(servingPair1NodesNames, workerNodeNames[idx])
servingPair1MsetNames = append(servingPair1MsetNames, msetNames[idx])
}
e2e.Logf("Serving pair 1 nodes = %v, machinesets = %v", servingPair1NodesNames, servingPair1MsetNames)
nonServingIndex := 2
nonServingMsetName := msetNames[nonServingIndex]
nonServingNodeName := workerNodeNames[nonServingIndex]
e2e.Logf("Non serving node = %v, machineset = %v", nonServingNodeName, nonServingMsetName)
servingPair2Indices := []int{3, 4}
var servingPair2NodeNames, servingPair2MsetNames []string
for _, idx := range servingPair2Indices {
servingPair2NodeNames = append(servingPair2NodeNames, workerNodeNames[idx])
servingPair2MsetNames = append(servingPair2MsetNames, msetNames[idx])
}
e2e.Logf("Serving pair 2 nodes = %v, machinesets = %v", servingPair2NodeNames, servingPair2MsetNames)
defaultWorkerIndex := 5
defaultWorkerNodeName := workerNodeNames[defaultWorkerIndex]
defaultWorkerMsetName := msetNames[defaultWorkerIndex]
e2e.Logf("Default worker node = %v, machineset = %v", defaultWorkerNodeName, defaultWorkerMsetName)
exutil.By("Creating a MachineHealthCheck for each serving machineset")
infraId := doOcpReq(oc, OcpGet, true, "infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}")
e2e.Logf("Found infra ID = %s", infraId)
for _, msetName := range append(servingPair1MsetNames, servingPair2MsetNames...) {
mhcName := fmt.Sprintf("%s-%s", mhcNamePrefix, msetName)
parsedTemplate := fmt.Sprintf("%s.template", mhcName)
mhc := mhcDescription{
Clusterid: infraId,
Maxunhealthy: "100%",
MachinesetName: msetName,
Name: mhcName,
Namespace: machineAPINamespace,
template: mhcTemplate,
}
defer mhc.deleteMhc(oc, parsedTemplate)
mhc.createMhc(oc, parsedTemplate)
}
exutil.By("Adding labels and taints to serving pair 1 nodes and the non serving node")
// The osd-fleet-manager.openshift.io/paired-nodes label is not a must for request serving nodes
e2e.Logf("Adding labels and taints to serving pair 1 nodes")
defer func() {
for _, node := range servingPair1NodesNames {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", node, servingComponentNodesTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, servingComponentNodesLabelKey+"-").Execute()
}
}()
for _, node := range servingPair1NodesNames {
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", node, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", node, servingComponentNodesLabel)
}
e2e.Logf("Adding labels and taints to the non serving node")
defer func() {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", nonServingNodeName, nonServingComponentTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nonServingNodeName, nonServingComponentLabelKey+"-").Execute()
}()
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", nonServingNodeName, nonServingComponentTaint)
doOcpReq(oc, OcpLabel, true, "node", nonServingNodeName, nonServingComponentLabel)
exutil.By("Installing the Hypershift Operator")
defer func() {
errList = append(errList, os.RemoveAll(tempDir))
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer func() {
// This is required otherwise the tainted serving nodes will not be removed
exutil.By("Waiting for the serving nodes to be removed before uninstalling the Hypershift Operator")
for _, node := range append(servingPair1NodesNames, servingPair2NodeNames...) {
exutil.WaitForNodeToDisappear(oc, node, LongTimeout, DefaultTimeout/10)
}
installHelper.hyperShiftUninstall()
}()
installHelper.hyperShiftInstall()
exutil.By("Creating hosted cluster 1 with request serving annotation")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster1 := installHelper.createClusterAWSCommonBuilder().
withName(hc1Name).
withNodePoolReplicas(1).
withAnnotation(hcTopologyAnnotationKey, "dedicated-request-serving-components").
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster1)
_ = installHelper.createAWSHostedClusters(createCluster1)
exutil.By("Adding labels and taints to serving pair 2 nodes")
// The osd-fleet-manager.openshift.io/paired-nodes label is not a must for request serving nodes
defer func() {
for _, node := range servingPair2NodeNames {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", node, servingComponentNodesTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, servingComponentNodesLabelKey+"-").Execute()
}
}()
for _, node := range servingPair2NodeNames {
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", node, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", node, servingComponentNodesLabel)
}
exutil.By("Creating hosted cluster 2 with request serving annotation")
createCluster2 := installHelper.createClusterAWSCommonBuilder().
withName(hc2Name).
withNodePoolReplicas(1).
withAnnotation(hcTopologyAnnotationKey, "dedicated-request-serving-components").
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
hostedCluster2Identifier := fmt.Sprintf("%s-%s", hostedCluster2.namespace, hostedCluster2.name)
e2e.Logf("Hosted cluster 2 created with identifier = %s", hostedCluster2Identifier)
exutil.By("Making sure that non-serving components are scheduled on a default worker node after filling up the non serving node")
podList, err := adminKubeClient.CoreV1().Pods(hostedCluster2Identifier).List(context.Background(), metav1.ListOptions{})
o.Expect(err).ShouldNot(o.HaveOccurred())
var podScheduledOnDefaultWorkerNode bool
for _, pod := range podList.Items {
podName := pod.Name
if isRequestServingComponent(podName) {
e2e.Logf("Pod %v belongs to a request serving component, skipping", podName)
continue
}
e2e.Logf("Pod %v belongs to a non-serving component", podName)
switch nodeName := pod.Spec.NodeName; nodeName {
case nonServingNodeName:
e2e.Logf("Pod scheduled on the non-serving node, expected")
case defaultWorkerNodeName:
e2e.Logf("Pod scheduled on the default worker node, expected")
podScheduledOnDefaultWorkerNode = true
default:
e2e.Failf("Pod scheduled on an unexpected node %v", nodeName)
}
}
o.Expect(podScheduledOnDefaultWorkerNode).To(o.BeTrue(), "Nothing scheduled on the default worker node")
})
/*
Marked as disruptive as we'll create an ICSP on the management cluster.
Test run duration: 33min
*/
g.It("NonPreRelease-Longduration-Author:fxie-Critical-67783-[HyperShiftINSTALL] The environment variable OPENSHIFT_IMG_OVERRIDES in CPO deployment should retain mirroring order under a source compared to the original mirror/source listing in the ICSP/IDMSs in the management cluster [Disruptive]", func() {
exutil.SkipIfPlatformTypeNot(oc, "aws")
type nodesSchedulabilityStatus bool
// Variables
var (
testCaseId = "67783"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
icspName = fmt.Sprintf("%s-icsp", resourceNamePrefix)
icspSource = "quay.io/openshift-release-dev/ocp-release"
icspMirrors = []string{
"quay.io/openshift-release-dev/ocp-release",
"pull.q1w2.quay.rhcloud.com/openshift-release-dev/ocp-release",
}
icspTemplate = template.Must(template.New("icspTemplate").Parse(`apiVersion: operator.openshift.io/v1alpha1
kind: ImageContentSourcePolicy
metadata:
name: {{ .Name }}
spec:
repositoryDigestMirrors:
- mirrors:
{{- range .Mirrors }}
- {{ . }}
{{- end }}
source: {{ .Source }}`))
adminKubeClient = oc.AdminKubeClient()
errList []error
allNodesSchedulable nodesSchedulabilityStatus = true
atLeastOneNodeUnschedulable nodesSchedulabilityStatus = false
)
// Utilities
var (
checkNodesSchedulability = func(expectedNodeSchedulability nodesSchedulabilityStatus) func(_ context.Context) (bool, error) {
return func(_ context.Context) (bool, error) {
nodeList, err := adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return false, err
}
for _, node := range nodeList.Items {
if !e2enode.IsNodeSchedulable(&node) {
e2e.Logf("Node %s unschedulable", node.Name)
return bool(!expectedNodeSchedulability), nil
}
}
// All nodes are schedulable if we reach here
return bool(expectedNodeSchedulability), nil
}
}
)
// Aggregated error handling
defer func() {
o.Expect(errors2.NewAggregate(errList)).NotTo(o.HaveOccurred())
}()
exutil.By("Checking if there's a need to skip the test case")
// ICSPs are not taken into account if IDMSs are found on the management cluster.
// It's ok to proceed even if the IDMS type is not registered to the API server, so no need to handle the error here.
idmsList, _ := oc.AdminConfigClient().ConfigV1().ImageDigestMirrorSets().List(context.Background(), metav1.ListOptions{})
if len(idmsList.Items) > 0 {
g.Skip("Found IDMSs, skipping")
}
// Also make sure the source (for which we'll declare mirrors) is only used by our the ICSP we create.
// The ICSP type is still under v1alpha1 so avoid using strongly-typed client here for future-proof-ness.
existingICSPSources, _, err := oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("ImageContentSourcePolicy", "-o=jsonpath={.items[*].spec.repositoryDigestMirrors[*].source}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(existingICSPSources, icspSource) {
g.Skip("An existing ICSP declares the source we'll be using, skipping")
}
exutil.By("Creating an ICSP on the management cluster")
e2e.Logf("Creating temporary directory")
defer func() {
errList = append(errList, os.RemoveAll(tempDir))
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var icspFile *os.File
icspFile, err = os.CreateTemp(tempDir, resourceNamePrefix)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
errList = append(errList, icspFile.Close())
}()
e2e.Logf("Parsed template: ")
err = icspTemplate.Execute(io.MultiWriter(g.GinkgoWriter, icspFile), &struct {
Name string
Source string
Mirrors []string
}{Name: icspName, Source: icspSource, Mirrors: icspMirrors})
o.Expect(err).NotTo(o.HaveOccurred(), "Error executing ICSP template")
e2e.Logf("Creating the parsed template")
defer func() {
// After the deletion of an ICSP, the MCO updates CRI-O configurations, cordoning the nodes in turn.
exutil.By("Restoring the management cluster")
e2e.Logf("Deleting the ICSP")
err = oc.AsAdmin().WithoutNamespace().Run(OcpDelete).Args("-f", icspFile.Name()).Execute()
errList = append(errList, err)
e2e.Logf("Waiting for the first node to be cordoned")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, DefaultTimeout, true, checkNodesSchedulability(atLeastOneNodeUnschedulable))
errList = append(errList, err)
e2e.Logf("Waiting for all nodes to be un-cordoned")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, LongTimeout, true, checkNodesSchedulability(allNodesSchedulable))
errList = append(errList, err)
}()
err = oc.AsAdmin().WithoutNamespace().Run(OcpCreate).Args("-f", icspFile.Name()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// After the creation of an ICSP, the MCO updates CRI-O configurations in a way
// that should not make the nodes un-schedulable. Make sure it is the case here.
e2e.Logf("Making sure that management cluster is stable")
// Simulate o.Consistently
err = wait.PollUntilContextTimeout(context.Background(), ShortTimeout/10, ShortTimeout, true, checkNodesSchedulability(atLeastOneNodeUnschedulable))
o.Expect(err).To(o.BeAssignableToTypeOf(context.DeadlineExceeded))
exutil.By("Installing the Hypershift Operator")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Found management cluster region = %s", region)
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a hosted cluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster)
hc := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Making sure that OPENSHIFT_IMG_OVERRIDES retains mirroring order from ICSP")
// The ICSP created has one and only one source.
// We expect parts like source=mirrorX to be adjacent to each other within OPENSHIFT_IMG_OVERRIDES
var parts []string
for _, mirror := range icspMirrors {
parts = append(parts, fmt.Sprintf("%s=%s", icspSource, mirror))
}
expectedSubstr := strings.Join(parts, ",")
e2e.Logf("Expect to find substring %s within OPENSHIFT_IMG_OVERRIDES", expectedSubstr)
cpoDeploy, err := adminKubeClient.AppsV1().Deployments(hc.getHostedComponentNamespace()).Get(context.Background(), "control-plane-operator", metav1.GetOptions{})
o.Expect(err).ShouldNot(o.HaveOccurred())
for _, container := range cpoDeploy.Spec.Template.Spec.Containers {
if container.Name != "control-plane-operator" {
continue
}
for _, env := range container.Env {
if env.Name != "OPENSHIFT_IMG_OVERRIDES" {
continue
}
e2e.Logf("Found OPENSHIFT_IMG_OVERRIDES=%s", env.Value)
o.Expect(env.Value).To(o.ContainSubstring(expectedSubstr))
}
}
})
/*
This test case requires a PublicAndPrivate hosted cluster.
External DNS is enabled by necessity, as it is required for PublicAndPrivate hosted clusters.
Test run duration: ~35min
*/
g.It("Longduration-NonPreRelease-Author:fxie-Critical-65606-[HyperShiftINSTALL] The cluster can be deleted successfully when hosted zone for private link is missing [Serial]", func() {
var (
testCaseId = "65606"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
ctx = context.Background()
)
exutil.By("Skipping incompatible platforms")
exutil.SkipIfPlatformTypeNot(oc, "aws")
exutil.By("Installing the Hypershift Operator")
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := os.RemoveAll(tempDir)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a PublicAndPrivate hosted cluster with external DNS enabled")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withEndpointAccess(PublicAndPrivate).
withReleaseImage(release).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
// Pause reconciliation so the awsprivatelink controller do not re-create the DNS records which we will delete
exutil.By("Pausing reconciliation")
defer func() {
exutil.By("Un-pausing reconciliation")
doOcpReq(oc, OcpPatch, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, "--type=merge", `--patch={"spec":{"pausedUntil":null}}`)
// Avoid intricate dependency violations that could occur during the deletion of the HC
e2e.Logf("Waiting until the un-pause signal propagates to the HCP")
o.Eventually(func() bool {
res := doOcpReq(oc, OcpGet, false, "hcp", "-n", hostedCluster.getHostedComponentNamespace(), hostedCluster.name, "-o=jsonpath={.spec.pausedUntil}")
return len(res) == 0
}).WithTimeout(DefaultTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
}()
doOcpReq(oc, OcpPatch, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, "--type=merge", `--patch={"spec":{"pausedUntil":"true"}}`)
exutil.By("Waiting until the awsprivatelink controller is actually paused")
// A hack for simplicity
_, err = exutil.WaitAndGetSpecificPodLogs(oc, hostedCluster.getHostedComponentNamespace(), "control-plane-operator", "deploy/control-plane-operator", "awsendpointservice | grep -i 'Reconciliation paused'")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get Route53 hosted zone for privatelink")
hzId := doOcpReq(oc, OcpGet, true, "awsendpointservice/private-router", "-n", hostedCluster.getHostedComponentNamespace(), "-o=jsonpath={.status.dnsZoneID}")
e2e.Logf("Found hosted zone ID = %s", hzId)
clusterinfra.GetAwsCredentialFromCluster(oc)
route53Client := exutil.NewRoute53Client()
// Get hosted zone name for logging purpose only
var getHzOut *route53.GetHostedZoneOutput
getHzOut, err = route53Client.GetHostedZoneWithContext(ctx, &route53.GetHostedZoneInput{
Id: aws.String(hzId),
})
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Found hosted zone name = %s", aws.StringValue(getHzOut.HostedZone.Name))
exutil.By("Delete Route53 hosted zone for privatelink")
e2e.Logf("Emptying Route53 hosted zone")
if _, err = route53Client.EmptyHostedZoneWithContext(ctx, hzId); err != nil {
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Failed to empty hosted zone (%s): %v", aerr.Code(), aerr.Message())
}
e2e.Failf("Failed to empty hosted zone %v", err)
}
e2e.Logf("Deleting Route53 hosted zone")
if _, err = route53Client.DeleteHostedZoneWithContextAndCheck(ctx, &route53.DeleteHostedZoneInput{
Id: aws.String(hzId),
}); err != nil {
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Failed to delete hosted zone (%s): %v", aerr.Code(), aerr.Message())
}
e2e.Failf("Failed to delete hosted zone %v", err)
}
})
/*
For the sake of this test, it is sufficient to create un-deletable PV in the hosted cluster
which is much simpler than installing the AWS EFS operator.
Test run duration: ~40min
*/
g.It("Longduration-NonPreRelease-Author:fxie-Critical-67225-[HyperShiftINSTALL] Test annotation 'hypershift.openshift.io/destroy-grace-period' in the HostedCluster [Serial]", func() {
exutil.SkipIfPlatformTypeNot(oc, "aws")
var (
testCaseId = getTestCaseIDs()[0]
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
pvName = fmt.Sprintf("%s-pv", resourceNamePrefix)
pvYamlStr = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolume
metadata:
name: %s
finalizers:
- what/ever
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteOnce
nfs:
path: /what/ever
server: 127.0.0.1`, pvName)
)
exutil.By("Installing the Hypershift Operator")
defer func() {
_ = os.RemoveAll(tempDir)
}()
err := os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var region string
region, err = getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a public HostedCluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withAnnotation(cleanupCloudResAnnotationKey, "true").
withAnnotation(destroyGracePeriodAnnotationKey, "120s").
withReleaseImage(release)
// Delete HC manually as it could be gone at this point
defer installHelper.deleteHostedClustersManual(createCluster)
hc := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Creating an un-deletable PV in the hosted cluster")
var pvFile *os.File
pvFile, err = os.CreateTemp(tempDir, resourceNamePrefix)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_ = pvFile.Close()
}()
_, err = io.MultiWriter(g.GinkgoWriter, pvFile).Write([]byte(pvYamlStr))
o.Expect(err).ShouldNot(o.HaveOccurred())
installHelper.createHostedClusterKubeconfig(createCluster, hc)
oc.SetGuestKubeconf(hc.hostedClustersKubeconfigFile)
doOcpReq(oc.AsGuestKubeconf(), OcpCreate, true, "-f", pvFile.Name())
exutil.By("Deleting the hosted cluster in a non blocking fashion")
doOcpReq(oc, OcpDelete, true, "hc", hc.name, "-n", hc.namespace, "--wait=false")
exutil.By("Waiting for the CloudResourcesDestroyed condition to be set")
o.Eventually(func() bool {
msg := doOcpReq(oc, OcpGet, false, "hc", hc.name, "-n", hc.namespace, `-o=jsonpath={.status.conditions[?(@.type=="CloudResourcesDestroyed")].message}`)
return strings.Contains(msg, "Remaining resources: persistent-volumes")
}).WithTimeout(LongTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting for the HostedClusterDestroyed condition to be set")
o.Eventually(func() bool {
reason := doOcpReq(oc, OcpGet, false, "hc", hc.name, "-n", hc.namespace, `-o=jsonpath={.status.conditions[?(@.type=="HostedClusterDestroyed")].reason}`)
return reason == "WaitingForGracePeriod"
}).WithTimeout(DoubleLongTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting for the HostedCluster to be deleted")
o.Eventually(func() bool {
_, stderr, err := oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("hc", hc.name, "-n", hc.namespace).Outputs()
return err != nil && strings.Contains(stderr, "NotFound")
}).WithTimeout(DefaultTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
})
/*
Day-1 creation is covered by CI. This test case focuses on day-2 key rotation.
Test run duration: ~55min
*/
g.It("Author:fxie-Longduration-NonPreRelease-Critical-73944-[HyperShiftINSTALL] AZURE Etcd Encryption [Serial]", func() {
exutil.SkipIfPlatformTypeNot(oc, "azure")
if exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("This test case requires root credentials, skipping")
}
var (
resourceNamePrefix = getResourceNamePrefix()
activeKeyName = fmt.Sprintf("%s-active-key", resourceNamePrefix)
backupKeyName = fmt.Sprintf("%s-backup-key", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
kvName = fmt.Sprintf("%s-kv", resourceNamePrefix)
rgName = fmt.Sprintf("%s-rg", resourceNamePrefix)
tmpDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
)
e2e.Logf("Getting Azure root credentials from MC")
azCreds := exutil.NewEmptyAzureCredentials()
err := azCreds.GetFromClusterAndDecode(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get Azure root credentials from MC")
exutil.By("Creating a resource group to hold the keyvault")
azClientSet := exutil.NewAzureClientSetWithRootCreds(oc)
_, err = azClientSet.GetResourceGroupClient(nil).CreateOrUpdate(context.Background(), rgName,
armresources.ResourceGroup{Location: to.Ptr(azCreds.AzureRegion)}, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create resource group %s", rgName))
defer func() {
err = azClientSet.DeleteResourceGroup(context.Background(), rgName)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete resource group")
}()
e2e.Logf("Getting object ID of the service principal")
var spObjectId string
spObjectId, err = azClientSet.GetServicePrincipalObjectId(context.Background(), azCreds.AzureClientID)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get object ID of service principal")
exutil.By("Creating a keyvault to hold the keys")
accessPolicies := []*armkeyvault.AccessPolicyEntry{
{
TenantID: to.Ptr(azCreds.AzureTenantID),
ObjectID: to.Ptr(spObjectId),
Permissions: &armkeyvault.Permissions{
Keys: []*armkeyvault.KeyPermissions{
to.Ptr(armkeyvault.KeyPermissionsDecrypt),
to.Ptr(armkeyvault.KeyPermissionsEncrypt),
to.Ptr(armkeyvault.KeyPermissionsCreate),
to.Ptr(armkeyvault.KeyPermissionsGet),
},
},
},
}
kvParams := armkeyvault.VaultCreateOrUpdateParameters{
Location: to.Ptr(azCreds.AzureRegion),
Properties: &armkeyvault.VaultProperties{
SKU: &armkeyvault.SKU{
Name: to.Ptr(armkeyvault.SKUNameStandard),
Family: to.Ptr(armkeyvault.SKUFamilyA),
},
TenantID: to.Ptr(azCreds.AzureTenantID),
AccessPolicies: accessPolicies,
EnablePurgeProtection: to.Ptr(true),
// Minimize this for a minimal chance of keyvault name collision
SoftDeleteRetentionInDays: to.Ptr[int32](7),
},
}
poller, err := azClientSet.GetVaultsClient(nil).BeginCreateOrUpdate(context.Background(), rgName,
kvName, kvParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create keyvalut %s", kvName))
_, err = poller.PollUntilDone(context.Background(), nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to poll for the termination of keyvault creation")
exutil.By("Creating keys within the keyvault")
keyParams := armkeyvault.KeyCreateParameters{
Properties: &armkeyvault.KeyProperties{
// RSA or EC: software-protected
// RSA-HSM or EC-HSM: hardware-protected
Kty: to.Ptr(armkeyvault.JSONWebKeyTypeRSA),
},
}
createActiveKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(context.Background(), rgName,
kvName, activeKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create active key")
createBackupKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(context.Background(), rgName,
kvName, backupKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create backup key")
e2e.Logf("Parsing key URIs")
var activeKey, backupKey azureKMSKey
activeKey, err = parseAzureVaultKeyURI(*createActiveKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse active key URI")
backupKey, err = parseAzureVaultKeyURI(*createBackupKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse backup key URI")
e2e.Logf("Create temporary directory")
o.Expect(os.MkdirAll(tmpDir, 0755)).NotTo(o.HaveOccurred(), "failed to create temporary directory")
defer func() {
o.Expect(os.RemoveAll(tmpDir)).NotTo(o.HaveOccurred(), "failed to remote temporary directory")
}()
exutil.By("Installing Hypershift Operator")
installHelper := installHelper{oc: oc, dir: tmpDir, iaasPlatform: iaasPlatform}
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating hosted cluster")
var release string
release, err = exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get release image")
createCluster := installHelper.createClusterAzureCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withReleaseImage(release).
withEncryptionKeyId(*createActiveKeyResp.Properties.KeyURIWithVersion)
defer installHelper.destroyAzureHostedClusters(createCluster)
hc := installHelper.createAzureHostedClusters(createCluster)
e2e.Logf("Extracting kubeconfig of the hosted cluster")
installHelper.createHostedClusterKubeconfig(createCluster, hc)
hc.oc.SetGuestKubeconf(hc.hostedClustersKubeconfigFile)
exutil.By("Specifying a backup key on the HC")
kasResourceVersion := doOcpReq(oc, OcpGet, true, "deploy/kube-apiserver",
"-n", hc.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
hc.patchAzureKMS(nil, &backupKey)
hc.waitForKASDeployUpdate(context.Background(), kasResourceVersion)
hc.waitForKASDeployReady(context.Background())
hc.checkAzureEtcdEncryption(activeKey, &backupKey)
exutil.By("Swapping active & backup key")
kasResourceVersion = doOcpReq(oc, OcpGet, true, "deploy/kube-apiserver",
"-n", hc.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
hc.patchAzureKMS(&backupKey, &activeKey)
hc.waitForKASDeployUpdate(context.Background(), kasResourceVersion)
hc.waitForKASDeployReady(context.Background())
hc.checkAzureEtcdEncryption(backupKey, &activeKey)
exutil.By("Re-encoding all Secrets & ConfigMaps using the current active key")
hc.encodeSecrets(context.Background())
hc.encodeConfigmaps(context.Background())
exutil.By("Remove the backup key from HC")
kasResourceVersion = doOcpReq(oc, OcpGet, true, "deploy/kube-apiserver",
"-n", hc.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
hc.removeAzureKMSBackupKey()
hc.waitForKASDeployUpdate(context.Background(), kasResourceVersion)
hc.waitForKASDeployReady(context.Background())
hc.checkAzureEtcdEncryption(backupKey, nil)
})
})
| package hypershift | ||||
test case | openshift/openshift-tests-private | 7f881e01-1a1c-400c-b54e-62a623651baf | Longduration-NonPreRelease-Author:liangli-Critical-42718-[HyperShiftINSTALL] Create a hosted cluster on aws using hypershift tool [Serial] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-42718-[HyperShiftINSTALL] Create a hosted cluster on aws using hypershift tool [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42718 is for AWS - skipping test ...")
}
caseID := "42718"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(2)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | 38f58fd2-9e3f-4ae8-99a4-4475b23bd509 | Longduration-NonPreRelease-Author:liangli-Critical-42866-[HyperShiftINSTALL] Create HostedCluster infrastructure on AWS by using Hypershift CLI [Serial] | ['"encoding/json"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-42866-[HyperShiftINSTALL] Create HostedCluster infrastructure on AWS by using Hypershift CLI [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42866 is for AWS - skipping test ...")
}
caseID := "42866"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Create the AWS infrastructure")
infraFile := installHelper.dir + "/" + clusterName + "-infra.json"
infra := installHelper.createInfraCommonBuilder().
withInfraID(clusterName + exutil.RandStrCustomize("123456789", 4)).
withOutputFile(infraFile)
defer installHelper.destroyAWSInfra(infra)
installHelper.createAWSInfra(infra)
exutil.By("Create AWS IAM resources")
iamFile := installHelper.dir + "/" + clusterName + "-iam.json"
iam := installHelper.createIamCommonBuilder(infraFile).
withInfraID(infra.InfraID).
withOutputFile(iamFile)
defer installHelper.destroyAWSIam(iam)
installHelper.createAWSIam(iam)
exutil.By("create aws HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withInfraJSON(infraFile).
withIamJSON(iamFile)
defer installHelper.destroyAWSHostedClusters(createCluster)
cluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("check vpc is as expected")
vpcID, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("awsclusters", "-n", cluster.namespace+"-"+cluster.name, cluster.name, `-ojsonpath='{.spec.network.vpc.id}'`).Output()
o.Expect(vpcID).NotTo(o.BeEmpty())
vpc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", "-n", cluster.namespace, cluster.name, `-ojsonpath='{.spec.platform.aws.cloudProviderConfig.vpc}'`).Output()
o.Expect(strings.Compare(vpcID, vpc) == 0).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 38a3f12b-3d5d-47a6-a048-ca5141bc6157 | Longduration-NonPreRelease-Author:liangli-Critical-42867-[HyperShiftINSTALL] Create iam and infrastructure repeatedly with the same infra-id on aws [Serial] | ['"encoding/json"', '"os"', '"reflect"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-42867-[HyperShiftINSTALL] Create iam and infrastructure repeatedly with the same infra-id on aws [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42867 is for AWS - skipping test ...")
}
caseID := "42867"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Create the AWS infrastructure 1")
infraFile := installHelper.dir + "/" + clusterName + "-infra.json"
infra := installHelper.createInfraCommonBuilder().
withName(clusterName + "infra1").
withInfraID(clusterName + exutil.RandStrCustomize("123456789", 4)).
withOutputFile(infraFile)
defer installHelper.destroyAWSInfra(infra)
installHelper.createAWSInfra(infra)
exutil.By("Create AWS IAM resources 1")
iamFile := installHelper.dir + "/" + clusterName + "-iam.json"
iam := installHelper.createIamCommonBuilder(infraFile).
withInfraID(infra.InfraID).
withOutputFile(iamFile)
defer installHelper.destroyAWSIam(iam)
installHelper.createAWSIam(iam)
exutil.By("Create the AWS infrastructure 2")
infraFile2 := installHelper.dir + "/" + clusterName + "-infra2.json"
infra2 := installHelper.createInfraCommonBuilder().
withName(clusterName + "infra2").
withInfraID(infra.InfraID).
withOutputFile(infraFile2)
defer installHelper.destroyAWSInfra(infra2)
installHelper.createAWSInfra(infra2)
exutil.By("Create AWS IAM resources 2")
iamFile2 := installHelper.dir + "/" + clusterName + "-iam2.json"
iam2 := installHelper.createIamCommonBuilder(infraFile2).
withInfraID(infra2.InfraID).
withOutputFile(iamFile2)
defer installHelper.destroyAWSIam(iam2)
installHelper.createAWSIam(iam2)
exutil.By("Compare two infra file")
o.Expect(reflect.DeepEqual(getJSONByFile(infraFile, "zones"), getJSONByFile(infraFile2, "zones"))).Should(o.BeTrue())
exutil.By("Compare two iam file")
o.Expect(strings.Compare(getSha256ByFile(iamFile), getSha256ByFile(iamFile2)) == 0).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 26633830-8636-48fb-ade5-5c05dece3175 | NonPreRelease-Longduration-Author:liangli-Critical-42952-[HyperShiftINSTALL] create multiple clusters without manifest crash and delete them asynchronously [Serial] | ['"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("NonPreRelease-Longduration-Author:liangli-Critical-42952-[HyperShiftINSTALL] create multiple clusters without manifest crash and delete them asynchronously [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 42952 is for AWS - skipping test ...")
}
caseID := "42952"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create aws HostedClusters 1")
createCluster1 := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID + "-1").
withNodePoolReplicas(1)
defer installHelper.deleteHostedClustersManual(createCluster1)
hostedCluster1 := installHelper.createAWSHostedClusters(createCluster1)
exutil.By("create aws HostedClusters 2")
createCluster2 := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID + "-2").
withNodePoolReplicas(1)
defer installHelper.deleteHostedClustersManual(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
exutil.By("delete HostedClusters CR background")
installHelper.deleteHostedClustersCRAllBackground()
exutil.By("check delete AWS HostedClusters asynchronously")
o.Eventually(func() int {
deletionTimestamp1, _ := hostedCluster1.getClustersDeletionTimestamp()
deletionTimestamp2, _ := hostedCluster2.getClustersDeletionTimestamp()
if len(deletionTimestamp1) == 0 || len(deletionTimestamp2) == 0 {
return -1
}
e2e.Logf("deletionTimestamp1:%s, deletionTimestamp2:%s", deletionTimestamp1, deletionTimestamp2)
return strings.Compare(deletionTimestamp1, deletionTimestamp2)
}, ShortTimeout, ShortTimeout/10).Should(o.Equal(0), "destroy AWS HostedClusters asynchronously error")
}) | |||||
test case | openshift/openshift-tests-private | 7db9bfcb-1769-4bef-9796-8680947aba88 | Longduration-NonPreRelease-Author:liangli-Critical-44924-[HyperShiftINSTALL] Test multi-zonal control plane components spread with HA mode enabled [Serial] | ['"fmt"', '"io"', '"os"', '"strings"', '"text/template"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-44924-[HyperShiftINSTALL] Test multi-zonal control plane components spread with HA mode enabled [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44924 is for AWS - skipping test ...")
}
caseID := "44924"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(2)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClustersRender(createCluster, func(filename string) error {
exutil.By("Set HighlyAvailable mode")
return replaceInFile(filename, "SingleReplica", "HighlyAvailable")
})
exutil.By("Check if pods of multi-zonal control plane components spread across multi-zone")
deploymentNames, err := hostedCluster.getHostedClustersHACPWorkloadNames("deployment")
o.Expect(err).NotTo(o.HaveOccurred())
for _, name := range deploymentNames {
value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", hostedCluster.namespace+"-"+hostedCluster.name, name, `-ojsonpath={.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[*].topologyKey}}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("deployment: %s: %s", name, value))
o.Expect(value).Should(o.ContainSubstring("topology.kubernetes.io/zone"), fmt.Sprintf("deployment: %s lack of anti-affinity of zone", name))
}
statefulSetNames, err := hostedCluster.getHostedClustersHACPWorkloadNames("statefulset")
o.Expect(err).NotTo(o.HaveOccurred())
for _, name := range statefulSetNames {
value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("statefulset", "-n", hostedCluster.namespace+"-"+hostedCluster.name, name, `-ojsonpath={.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[*].topologyKey}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("statefulSetNames: %s: %s", name, value))
o.Expect(value).Should(o.ContainSubstring("topology.kubernetes.io/zone"), fmt.Sprintf("statefulset: %s lack of anti-affinity of zone", name))
}
}) | |||||
test case | openshift/openshift-tests-private | c4748113-a372-454a-8bd8-56bf99c24142 | Longduration-NonPreRelease-Author:liangli-Critical-44981-[HyperShiftINSTALL] Test built-in control plane pod tolerations [Serial] [Disruptive] | ['"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-44981-[HyperShiftINSTALL] Test built-in control plane pod tolerations [Serial] [Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44981 is for AWS - skipping test ...")
}
nodeAction := newNodeAction(oc)
nodes, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodes) < 2 {
g.Skip("work node should >= 2 - skipping test ...")
}
caseID := "44981"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err = os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("update taint and label, taint and label use key 'hypershift.openshift.io/cluster'")
defer nodeAction.taintNode(nodes[0], "hypershift.openshift.io/cluster="+oc.Namespace()+"-"+clusterName+":NoSchedule-")
nodeAction.taintNode(nodes[0], "hypershift.openshift.io/cluster="+oc.Namespace()+"-"+clusterName+":NoSchedule")
defer nodeAction.labelNode(nodes[0], "hypershift.openshift.io/cluster-")
nodeAction.labelNode(nodes[0], "hypershift.openshift.io/cluster="+oc.Namespace()+"-"+clusterName)
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().withName(clusterName).withNodePoolReplicas(0)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Check if control plane pods in HostedClusters are on " + nodes[0])
o.Eventually(hostedCluster.pollIsCPPodOnlyRunningOnOneNode(nodes[0]), DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Check if control plane pods in HostedClusters error")
exutil.By("update taint and label, taint and label use key 'hypershift.openshift.io/control-plane'")
defer nodeAction.taintNode(nodes[1], "hypershift.openshift.io/control-plane=true:NoSchedule-")
nodeAction.taintNode(nodes[1], "hypershift.openshift.io/control-plane=true:NoSchedule")
defer nodeAction.labelNode(nodes[1], "hypershift.openshift.io/control-plane-")
nodeAction.labelNode(nodes[1], "hypershift.openshift.io/control-plane=true")
exutil.By("create HostedClusters 2")
createCluster2 := installHelper.createClusterAWSCommonBuilder().withName(clusterName + "-2").withNodePoolReplicas(0)
defer installHelper.destroyAWSHostedClusters(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
exutil.By("Check if control plane pods in HostedClusters are on " + nodes[1])
o.Eventually(hostedCluster2.pollIsCPPodOnlyRunningOnOneNode(nodes[1]), DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Check if control plane pods in HostedClusters error")
}) | |||||
test case | openshift/openshift-tests-private | c1365183-159a-4b2d-b697-2975ddbc4602 | Longduration-NonPreRelease-Author:liangli-Critical-45341-[HyperShiftINSTALL] Test NodePort Publishing Strategy [Serial] [Disruptive] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-45341-[HyperShiftINSTALL] Test NodePort Publishing Strategy [Serial] [Disruptive]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 44981 is for AWS - skipping test ...")
}
caseID := "45341"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Create a nodeport ip bastion")
preStartJobSetup := newPreStartJob(clusterName+"-setup", oc.Namespace(), caseID, "setup", dir)
preStartJobTeardown := newPreStartJob(clusterName+"-teardown", oc.Namespace(), caseID, "teardown", dir)
defer preStartJobSetup.delete(oc)
preStartJobSetup.create(oc)
defer preStartJobTeardown.delete(oc)
defer preStartJobTeardown.create(oc)
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(1)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClustersRender(createCluster, func(filename string) error {
exutil.By("Test NodePort Publishing Strategy")
ip := preStartJobSetup.preStartJobIP(oc)
e2e.Logf("ip:" + ip)
return replaceInFile(filename, "type: LoadBalancer", "type: NodePort\n nodePort:\n address: "+ip)
})
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(1), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | 3cfc006c-3082-4cfd-bc58-b8c17a20e23a | Longduration-NonPreRelease-Author:liangli-Critical-47053-[HyperShiftINSTALL] Test InfrastructureTopology configuration [Serial] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-47053-[HyperShiftINSTALL] Test InfrastructureTopology configuration [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 47053 is for AWS - skipping test ...")
}
caseID := "47053"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters-1")
createCluster1 := installHelper.createClusterAWSCommonBuilder().
withName(clusterName + "-1").
withNodePoolReplicas(1)
defer installHelper.destroyAWSHostedClusters(createCluster1)
hostedCluster1 := installHelper.createAWSHostedClusters(createCluster1)
exutil.By("check HostedClusters-1 HostedClusterInfrastructureTopology")
installHelper.createHostedClusterKubeconfig(createCluster1, hostedCluster1)
o.Eventually(hostedCluster1.pollGetHostedClusterInfrastructureTopology(), LongTimeout, LongTimeout/10).Should(o.ContainSubstring("SingleReplica"), fmt.Sprintf("--infra-availability-policy (default SingleReplica) error"))
exutil.By("create HostedClusters-2 infra-availability-policy: HighlyAvailable")
createCluster2 := installHelper.createClusterAWSCommonBuilder().
withName(clusterName + "-2").
withNodePoolReplicas(2).
withInfraAvailabilityPolicy("HighlyAvailable")
defer installHelper.destroyAWSHostedClusters(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
exutil.By("check HostedClusters-2 HostedClusterInfrastructureTopology")
installHelper.createHostedClusterKubeconfig(createCluster2, hostedCluster2)
o.Eventually(hostedCluster2.pollGetHostedClusterInfrastructureTopology(), LongTimeout, LongTimeout/10).Should(o.ContainSubstring("HighlyAvailable"), fmt.Sprintf("--infra-availability-policy HighlyAvailable"))
exutil.By("Check if pods of multi-zonal components spread across multi-zone")
o.Eventually(func() string {
value, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+hostedCluster2.hostedClustersKubeconfigFile, "deployment", "-A", "-ojsonpath={.items[*].spec.replicas}").Output()
return strings.ReplaceAll(strings.ReplaceAll(value, "1", ""), " ", "")
}, DefaultTimeout, DefaultTimeout/10).ShouldNot(o.BeEmpty())
}) | |||||
test case | openshift/openshift-tests-private | aa5ad271-4c1c-45b4-8767-0b27875407f0 | Longduration-NonPreRelease-Author:liangli-Critical-48133-[HyperShiftINSTALL] Apply user defined tags to all AWS resources [Serial] | ['"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-48133-[HyperShiftINSTALL] Apply user defined tags to all AWS resources [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48133 is for AWS - skipping test ...")
}
caseID := "48133"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(2).
withAdditionalTags("adminContact=HyperShiftInstall,customTag=test")
defer installHelper.destroyAWSHostedClusters(createCluster)
cluster := installHelper.createAWSHostedClusters(createCluster)
installHelper.createHostedClusterKubeconfig(createCluster, cluster)
exutil.By("Confirm user defined tags")
checkSubstring(doOcpReq(oc, OcpGet, false, "hostedcluster", "-n", cluster.namespace, cluster.name, `-ojsonpath={.spec.platform.aws.resourceTags}`), []string{`{"key":"adminContact","value":"HyperShiftInstall"}`, `{"key":"customTag","value":"test"}`})
o.Expect(strings.Count(doOcpReq(oc, OcpGet, false, "awsmachines", "-n", cluster.namespace+"-"+cluster.name, `-ojsonpath={.items[*].spec.additionalTags}`), "HyperShiftInstall")).Should(o.Equal(2))
checkSubstring(doOcpReq(oc, OcpGet, false, "--kubeconfig="+cluster.hostedClustersKubeconfigFile, "infrastructure", "cluster", `-ojsonpath={.status.platformStatus.aws.resourceTags}`), []string{`{"key":"adminContact","value":"HyperShiftInstall"}`, `{"key":"customTag","value":"test"}`})
checkSubstring(doOcpReq(oc, OcpGet, false, "--kubeconfig="+cluster.hostedClustersKubeconfigFile, "-n", "openshift-ingress", "svc/router-default", `-ojsonpath={.metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-additional-resource-tags}`), []string{"adminContact=HyperShiftInstall", "customTag=test"})
}) | |||||
test case | openshift/openshift-tests-private | 7f96db9f-42ba-4d34-bdf9-6645465b07b4 | Longduration-NonPreRelease-Author:liangli-Critical-48672-[HyperShiftINSTALL] Create multi-zone AWS infrastructure and NodePools via CLI [Serial] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-48672-[HyperShiftINSTALL] Create multi-zone AWS infrastructure and NodePools via CLI [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 48672 is for AWS - skipping test ...")
}
// this case needs 3 zones
zones := getAWSMgmtClusterRegionAvailableZones(oc)
if len(zones) < 3 {
g.Skip("mgmt cluster has less than 3 zones: " + strings.Join(zones, " ") + " - skipping test ...")
}
caseID := "48672"
dir := "/tmp/hypershift" + caseID
clusterName := "hypershift-" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(clusterName).
withNodePoolReplicas(1).
withZones(strings.Join(zones[:3], ",")).
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
exutil.By("Check the hostedcluster and nodepool")
checkSubstring(doOcpReq(oc, OcpGet, false, "awsmachines", "-n", hostedCluster.namespace+"-"+hostedCluster.name, `-ojsonpath={.items[*].spec.providerID}`), zones[:3])
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(len(zones)), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | 9f72f9ce-ac96-4510-b6c2-049c57cedf6b | Longduration-NonPreRelease-Author:liangli-Critical-49129-[HyperShiftINSTALL] Create multi-zone Azure infrastructure and nodepools via CLI [Serial] | ['"fmt"', '"os"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-49129-[HyperShiftINSTALL] Create multi-zone Azure infrastructure and nodepools via CLI [Serial]", func() {
if iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49129 is for azure - skipping test ...")
}
caseID := "49129"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{oc: oc, dir: dir, iaasPlatform: iaasPlatform}
exutil.By("install HyperShift operator")
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAzureCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(2).
withReleaseImage(release)
defer installHelper.destroyAzureHostedClusters(createCluster)
hostedCluster := installHelper.createAzureHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | 8d60b92c-6669-4cb8-894f-92ccce6864e4 | Longduration-NonPreRelease-Author:liangli-Critical-49173-[HyperShiftINSTALL] Test Azure node root disk size [Serial] | ['"fmt"', '"os"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', '"k8s.io/utils/ptr"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-49173-[HyperShiftINSTALL] Test Azure node root disk size [Serial]", func() {
if iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49173 is for azure - skipping test ...")
}
caseID := "49173"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{oc: oc, dir: dir, iaasPlatform: iaasPlatform}
exutil.By("install HyperShift operator")
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAzureCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(1).
withRootDiskSize(64).
withReleaseImage(release)
defer installHelper.destroyAzureHostedClusters(createCluster)
hostedCluster := installHelper.createAzureHostedClusters(createCluster)
exutil.By("Check the disk size for the nodepool '" + hostedCluster.name + "'")
o.Expect(hostedCluster.getAzureDiskSizeGBByNodePool(hostedCluster.name)).Should(o.ContainSubstring("64"))
exutil.By("Get subnet ID of the hosted cluster")
subnetId := hostedCluster.getAzureSubnetId()
e2e.Logf("Found subnet ID = %s", subnetId)
exutil.By("create nodepool and check root-disk-size (default 120)")
np1Name := hostedCluster.name + "-1"
NewAzureNodePool(np1Name, hostedCluster.name, oc.Namespace()).
WithNodeCount(ptr.To(1)).
WithSubnetId(subnetId).
CreateAzureNodePool()
o.Expect(hostedCluster.getAzureDiskSizeGBByNodePool(np1Name)).Should(o.ContainSubstring("120"))
exutil.By("create nodepool and check root-disk-size (256)")
np2Name := hostedCluster.name + "-2"
NewAzureNodePool(np2Name, hostedCluster.name, oc.Namespace()).
WithNodeCount(ptr.To(1)).
WithRootDiskSize(ptr.To(256)).
WithSubnetId(subnetId).
CreateAzureNodePool()
o.Expect(hostedCluster.getAzureDiskSizeGBByNodePool(np2Name)).Should(o.ContainSubstring("256"))
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(3), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | 9fe1fa5b-f4c2-4cd5-93dc-8503bc33f1c2 | Longduration-NonPreRelease-Author:liangli-Critical-49174-[HyperShiftINSTALL] Create Azure infrastructure and nodepools via CLI [Serial] | ['"context"', '"fmt"', '"os"', '"k8s.io/apimachinery/pkg/api/errors"', 'errors2 "k8s.io/apimachinery/pkg/util/errors"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:liangli-Critical-49174-[HyperShiftINSTALL] Create Azure infrastructure and nodepools via CLI [Serial]", func() {
if iaasPlatform != "azure" {
g.Skip("IAAS platform is " + iaasPlatform + " while 49174 is for azure - skipping test ...")
}
caseID := "49174"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{oc: oc, dir: dir, iaasPlatform: iaasPlatform}
exutil.By("install HyperShift operator")
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAzureCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(1).
withResourceGroupTags("foo=bar,baz=quux").
withReleaseImage(release)
defer installHelper.destroyAzureHostedClusters(createCluster)
hostedCluster := installHelper.createAzureHostedClusters(createCluster)
exutil.By("Scale up nodepool")
doOcpReq(oc, OcpScale, false, "nodepool", hostedCluster.name, "--namespace", hostedCluster.namespace, "--replicas=2")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), DoubleLongTimeout, DoubleLongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
// A part of https://issues.redhat.com/browse/HOSTEDCP-1411
_, err = oc.AdminKubeClient().CoreV1().Secrets("kube-system").Get(context.Background(), "azure-credentials", metav1.GetOptions{})
if errors.IsNotFound(err) {
e2e.Logf("Root creds not found on the management cluster, skip the Azure resource group check")
return
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Checking tags on the Azure resource group")
rgName, err := hostedCluster.getResourceGroupName()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Found resource group name = %s", rgName)
azClientSet := exutil.NewAzureClientSetWithRootCreds(oc)
resourceGroupsClientGetResponse, err := azClientSet.GetResourceGroupClient(nil).Get(context.Background(), rgName, nil)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(*resourceGroupsClientGetResponse.Tags["foo"]).To(o.Equal("bar"))
o.Expect(*resourceGroupsClientGetResponse.Tags["baz"]).To(o.Equal("quux"))
}) | |||||
test case | openshift/openshift-tests-private | 73d98ee0-4b09-40c5-80d7-5024d8b575cd | Longduration-NonPreRelease-Author:heli-Critical-64405-[HyperShiftINSTALL] Create a cluster in the AWS Region ap-southeast-3 [Serial] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-64405-[HyperShiftINSTALL] Create a cluster in the AWS Region ap-southeast-3 [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 64405 is for AWS - skipping test ...")
}
region, err := getClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
if region != "ap-southeast-3" {
g.Skip("region is " + region + " while 64405 is for ap-southeast-3 - skipping test ...")
}
caseID := "64405"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err = os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID).
withNodePoolReplicas(2)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | 93dd6cd0-7242-4f41-a8d8-8f4262a96a2d | Longduration-NonPreRelease-Author:heli-Critical-62085-Critical-60483-Critical-64808-[HyperShiftINSTALL] The cluster should be deleted successfully when there is no identity provider [Serial] | ['"fmt"', '"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/aws/aws-sdk-go/service/route53"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-62085-Critical-60483-Critical-64808-[HyperShiftINSTALL] The cluster should be deleted successfully when there is no identity provider [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 62085,60483,64808 is for AWS - skipping test ...")
}
caseID := "62085-60483-64808"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(2).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
// For OCPBUGS-19674 and OCPBUGS-20163 (clone of the former)
{
exutil.By("Make sure the API server is exposed via Route")
o.Expect(hostedCluster.getSvcPublishingStrategyType(hcServiceAPIServer)).To(o.Equal(hcServiceTypeRoute))
exutil.By("Make sure the hosted cluster reports correct control plane endpoint port")
o.Expect(hostedCluster.getControlPlaneEndpointPort()).To(o.Equal("443"))
}
exutil.By("delete OpenID connect from aws IAM Identity providers")
infraID := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, `-ojsonpath={.spec.infraID}`)
provider := fmt.Sprintf("%s.s3.%s.amazonaws.com/%s", bucketName, region, infraID)
e2e.Logf("trying to delete OpenIDConnectProvider: %s", provider)
clusterinfra.GetAwsCredentialFromCluster(oc)
iamClient := exutil.NewIAMClient()
o.Expect(iamClient.DeleteOpenIDConnectProviderByProviderName(provider)).ShouldNot(o.HaveOccurred())
exutil.By("update control plane policy to remove security operations")
roleAndPolicyName := infraID + "-control-plane-operator"
var policyDocument = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:CreateVpcEndpoint",
"ec2:ModifyVpcEndpoint",
"ec2:DeleteVpcEndpoints",
"ec2:CreateTags",
"route53:ListHostedZones",
"ec2:DescribeVpcs"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets"
],
"Resource": "arn:aws:route53:::hostedzone/Z08584472H531BKOV71X7"
}
]
}`
policy, err := iamClient.GetRolePolicy(roleAndPolicyName, roleAndPolicyName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("original role policy is %s", policy)
o.Expect(iamClient.UpdateRolePolicy(roleAndPolicyName, roleAndPolicyName, policyDocument)).NotTo(o.HaveOccurred())
policy, err = iamClient.GetRolePolicy(roleAndPolicyName, roleAndPolicyName)
e2e.Logf("updated role policy is %s", policy)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(policy).ShouldNot(o.ContainSubstring("SecurityGroup"))
exutil.By("ocp-64808 check hosted condition ValidAWSIdentityProvider should be unknown")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, `-ojsonpath={.status.conditions[?(@.type=="ValidAWSIdentityProvider")].status}`)
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("False"), fmt.Sprintf("%s expected condition ValidAWSIdentityProvider False status not found error", hostedCluster.name))
}) | |||||
test case | openshift/openshift-tests-private | d9a96548-018f-475b-8a6b-0e8ca3679f7f | Longduration-NonPreRelease-Author:heli-Critical-60484-[HyperShiftINSTALL] HostedCluster deletion shouldn't hang when OIDC provider/STS is configured incorrectly [Serial] | ['"fmt"', '"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-60484-[HyperShiftINSTALL] HostedCluster deletion shouldn't hang when OIDC provider/STS is configured incorrectly [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 60484 is for AWS - skipping test ...")
}
caseID := "60484"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
installHelper.newAWSS3Client()
installHelper.createAWSS3Bucket()
exutil.By("install HO without s3 credentials")
var installCMD = fmt.Sprintf("hypershift install "+
"--oidc-storage-provider-s3-bucket-name %s "+
"--oidc-storage-provider-s3-region %s "+
"--private-platform AWS "+
"--aws-private-creds %s "+
"--aws-private-region=%s",
bucketName, region, getAWSPrivateCredentials(), region)
var cmdClient = NewCmdClient().WithShowInfo(true)
defer installHelper.hyperShiftUninstall()
_, err = cmdClient.Run(installCMD).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("create HostedClusters")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(0).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate)
defer installHelper.destroyAWSHostedClusters(createCluster)
hc := installHelper.createAWSHostedClusterWithoutCheck(createCluster)
exutil.By("check hosted cluster condition ValidOIDCConfiguration")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", "-o", `jsonpath={.status.conditions[?(@.type=="ValidOIDCConfiguration")].status}`)
}, DefaultTimeout, DefaultTimeout/10).Should(o.ContainSubstring("False"))
msg := doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", "-o", `jsonpath={.status.conditions[?(@.type=="ValidOIDCConfiguration")].message}`)
e2e.Logf("error msg of condition ValidOIDCConfiguration is %s", msg)
}) | |||||
test case | openshift/openshift-tests-private | a8151cb0-0121-42ae-aa08-321a2544f8fb | Longduration-NonPreRelease-Author:heli-Critical-67828-[HyperShiftINSTALL] non-serving components land on non-serving nodes versus default workers [Serial] | ['"encoding/json"', '"fmt"', '"io"', '"os"', '"path"', '"path/filepath"', '"strings"', '"text/template"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"k8s.io/apimachinery/pkg/labels"', '"k8s.io/kubernetes/pkg/util/taints"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-67828-[HyperShiftINSTALL] non-serving components land on non-serving nodes versus default workers [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 67828 is for AWS - skipping test ...")
}
if !exutil.IsInfrastructuresHighlyAvailable(oc) {
g.Skip("ocp-67828 is for Infra HA OCP - skipping test ...")
}
msNames := strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachineset, "--ignore-not-found", `-o=jsonpath={.items[*].metadata.name}`), " ")
if len(msNames) < 3 {
g.Skip("ocp-67828 is for Infra HA OCP and expects for 3 machinesets - skipping test ... ")
}
caseID := "67828"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("config mgmt cluster: scale a machineseet to repicas==2")
oriDeletePolicy := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, msNames[2], `-o=jsonpath={.spec.deletePolicy}`)
defer func() {
if oriDeletePolicy == "" {
doOcpReq(oc, OcpPatch, false, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--type=json", "-p", `[{"op": "remove", "path": "/spec/deletePolicy"}]`)
} else {
doOcpReq(oc, OcpPatch, false, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--type=merge", fmt.Sprintf(`--patch={"spec": {"deletePolicy": "%s"}}`, oriDeletePolicy))
}
}()
doOcpReq(oc, OcpPatch, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--type=merge", `--patch={"spec": {"deletePolicy": "Newest"}}`)
oriReplicas := doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], `-o=jsonpath={.spec.replicas}`)
defer doOcpReq(oc, OcpScale, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--replicas="+oriReplicas)
doOcpReq(oc, OcpScale, true, "-n", machineAPINamespace, mapiMachineset, msNames[2], "--replicas=2")
o.Eventually(func() bool {
return checkMachinesetReplicaStatus(oc, msNames[2])
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("machineset %s are ready", msNames[2]))
// choose msNames[0], msNames[1] as serving component nodes, msNames[2] as non-serving component nodes
var nonServingComponentNodes = strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachine, "-l", fmt.Sprintf("machine.openshift.io/cluster-api-machineset=%s", msNames[2]), `-o=jsonpath={.items[*].status.nodeRef.name}`), " ")
var servingComponentNodes []string
for i := 0; i < 2; i++ {
servingComponentNodes = append(servingComponentNodes, strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachine, "-l", fmt.Sprintf("machine.openshift.io/cluster-api-machineset=%s", msNames[i]), `-o=jsonpath={.items[*].status.nodeRef.name}`), " ")...)
}
exutil.By("install hypershift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
externalDNS: true,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("add label/taint to servingComponentNodes")
defer func() {
removeNodesTaint(oc, servingComponentNodes, servingComponentNodesTaintKey)
removeNodesLabel(oc, servingComponentNodes, servingComponentNodesLabelKey)
}()
for _, no := range servingComponentNodes {
doOcpReq(oc, OcpAdm, true, "taint", "node", no, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", no, servingComponentNodesLabel)
}
exutil.By("add label/taint to nonServingComponentNodes")
defer func() {
removeNodesTaint(oc, nonServingComponentNodes, nonServingComponentTaintKey)
removeNodesLabel(oc, nonServingComponentNodes, nonServingComponentLabelKey)
}()
for _, no := range nonServingComponentNodes {
doOcpReq(oc, OcpAdm, true, "taint", "node", no, nonServingComponentTaint)
doOcpReq(oc, OcpLabel, true, "node", no, nonServingComponentLabel)
}
exutil.By("create MachineHealthCheck for serving component machinesets")
clusterID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
mhcBaseDir := exutil.FixturePath("testdata", "hypershift")
mhcTemplate := filepath.Join(mhcBaseDir, "mhc.yaml")
mhc := make([]mhcDescription, 2)
for i := 0; i < 2; i++ {
mhc[i] = mhcDescription{
Clusterid: clusterID,
Maxunhealthy: "100%",
MachinesetName: msNames[i],
Name: "mhc-67828-" + msNames[i],
Namespace: machineAPINamespace,
template: mhcTemplate,
}
}
defer mhc[0].deleteMhc(oc, "mhc-67828-"+msNames[0]+".template")
mhc[0].createMhc(oc, "mhc-67828-"+msNames[0]+".template")
defer mhc[1].deleteMhc(oc, "mhc-67828-"+msNames[1]+".template")
mhc[1].createMhc(oc, "mhc-67828-"+msNames[1]+".template")
exutil.By("create a hosted cluster")
release, er := exutil.GetReleaseImage(oc)
o.Expect(er).NotTo(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID+"-"+strings.ToLower(exutil.RandStr(5))).
withNodePoolReplicas(2).
withAnnotation("hypershift.openshift.io/topology", "dedicated-request-serving-components").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS).
withReleaseImage(release)
defer func() {
exutil.By("in defer function, destroy the hosted cluster")
installHelper.destroyAWSHostedClusters(createCluster)
exutil.By("check the previous serving nodes are deleted and new serving nodes are created (machinesets are still in ready status)")
o.Eventually(func() bool {
for _, no := range servingComponentNodes {
noinfo := doOcpReq(oc, OcpGet, false, "no", "--ignore-not-found", no)
if strings.TrimSpace(noinfo) != "" {
return false
}
}
for i := 0; i < 2; i++ {
if !checkMachinesetReplicaStatus(oc, msNames[i]) {
return false
}
}
return true
}, 2*DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), fmt.Sprintf("serving node are not deleted %+v", servingComponentNodes))
exutil.By("no cluster label annotation in the new serving nodes")
for i := 0; i < 2; i++ {
for _, no := range strings.Split(doOcpReq(oc, OcpGet, true, "-n", machineAPINamespace, mapiMachine, "-l", fmt.Sprintf("machine.openshift.io/cluster-api-machineset=%s", msNames[i]), `-o=jsonpath={.items[*].status.nodeRef.name}`), " ") {
o.Expect(doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-ojsonpath={.labels.hypershift\.openshift\.io/cluster}`)).Should(o.BeEmpty())
o.Expect(doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-ojsonpath={.labels.hypershift\.openshift\.io/cluster-name}`)).Should(o.BeEmpty())
o.Expect(doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-ojsonpath={.spec.taints[?(@.key=="hypershift.openshift.io/cluster")].value}`)).Should(o.BeEmpty())
}
}
}()
hc := installHelper.createAWSHostedClusters(createCluster)
hcpNS := hc.namespace + "-" + hc.name
exutil.By("check hostedcluster annotation")
clusterSchValue := doOcpReq(oc, OcpGet, true, "-n", hc.namespace, "hostedcluster", hc.name, "--ignore-not-found", `-ojsonpath={.metadata.annotations.hypershift\.openshift\.io/cluster-scheduled}`)
o.Expect(clusterSchValue).Should(o.Equal("true"))
clusterTopology := doOcpReq(oc, OcpGet, true, "-n", hc.namespace, "hostedcluster", hc.name, "--ignore-not-found", `-ojsonpath={.metadata.annotations.hypershift\.openshift\.io/topology}`)
o.Expect(clusterTopology).Should(o.Equal("dedicated-request-serving-components"))
exutil.By("check hosted cluster hcp serving components' node allocation")
var servingComponentsNodeLocation = make(map[string]struct{})
hcpServingComponents := []string{"kube-apiserver", "ignition-server-proxy", "oauth-openshift", "private-router"}
for _, r := range hcpServingComponents {
nodes := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp="+r, `-ojsonpath={.items[*].spec.nodeName}`), " ")
for _, n := range nodes {
o.Expect(n).Should(o.BeElementOf(servingComponentNodes))
servingComponentsNodeLocation[n] = struct{}{}
}
}
o.Expect(servingComponentsNodeLocation).ShouldNot(o.BeEmpty())
exutil.By("check serving nodes hcp labels and taints are generated automatically on the serving nodes")
for no := range servingComponentsNodeLocation {
cluster := doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-o=jsonpath={.metadata.labels.hypershift\.openshift\.io/cluster}`)
o.Expect(cluster).Should(o.Equal(hcpNS))
clusterName := doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-o=jsonpath={.metadata.labels.hypershift\.openshift\.io/cluster-name}`)
o.Expect(clusterName).Should(o.Equal(hc.name))
hcpTaint := doOcpReq(oc, OcpGet, false, "node", no, "--ignore-not-found", `-o=jsonpath={.spec.taints[?(@.key=="hypershift.openshift.io/cluster")].value}`)
o.Expect(hcpTaint).Should(o.Equal(hcpNS))
}
hcpNonServingComponents := []string{
"cloud-controller-manager",
"aws-ebs-csi-driver-controller",
"capi-provider-controller-manager",
"catalog-operator",
"certified-operators-catalog",
"cloud-network-config-controller",
"cluster-api",
"cluster-autoscaler",
"cluster-network-operator",
"cluster-node-tuning-operator",
"cluster-policy-controller",
"cluster-version-operator",
"community-operators-catalog",
"control-plane-operator",
"csi-snapshot-controller",
"csi-snapshot-controller-operator",
"csi-snapshot-webhook",
"dns-operator",
"etcd",
"hosted-cluster-config-operator",
"ignition-server",
"ingress-operator",
"konnectivity-agent",
"kube-controller-manager",
"kube-scheduler",
"machine-approver",
"multus-admission-controller",
"network-node-identity",
"olm-operator",
"openshift-apiserver",
"openshift-controller-manager",
"openshift-oauth-apiserver",
"openshift-route-controller-manager",
"ovnkube-control-plane",
"packageserver",
"redhat-marketplace-catalog",
"redhat-operators-catalog",
}
for _, r := range hcpNonServingComponents {
nodes := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp="+r, `-o=jsonpath={.items[*].spec.nodeName}`), " ")
for _, n := range nodes {
o.Expect(n).Should(o.BeElementOf(nonServingComponentNodes))
}
}
//no app labels components
hcpNonServingComponentsWithoutAppLabels := []string{
"aws-ebs-csi-driver-operator",
"cluster-image-registry-operator",
"cluster-storage-operator",
}
for _, r := range hcpNonServingComponentsWithoutAppLabels {
nodes := strings.Split(doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lname="+r, `-o=jsonpath={.items[*].spec.nodeName}`), " ")
for _, n := range nodes {
o.Expect(n).Should(o.BeElementOf(nonServingComponentNodes))
}
}
}) | |||||
test case | openshift/openshift-tests-private | 6e3c5ad4-7161-4c21-952d-07eae77e9ad0 | Longduration-NonPreRelease-Author:heli-Critical-67721-[HyperShiftINSTALL] Hypershift Operator version validation is not skipping version checks for node pools [Serial] | ['"fmt"', '"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-67721-[HyperShiftINSTALL] Hypershift Operator version validation is not skipping version checks for node pools [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 67721 is for AWS - skipping test ...")
}
caseID := "67721"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
installHelper := installHelper{oc: oc, bucketName: "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault()), dir: dir, iaasPlatform: iaasPlatform}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("check hosted cluster supported version")
supportedVersion := doOcpReq(oc, OcpGet, true, "configmap", "-n", "hypershift", "supported-versions", `-ojsonpath={.data.supported-versions}`)
e2e.Logf("supported version is: " + supportedVersion)
minSupportedVersion, err := getVersionWithMajorAndMinor(getMinSupportedOCPVersion())
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(supportedVersion).Should(o.ContainSubstring(minSupportedVersion))
exutil.By("get max unsupported HostedClusters version nightly release")
maxUnsupportedVersion, err := getVersionWithMajorAndMinor(getLatestUnsupportedOCPVersion())
o.Expect(err).ShouldNot(o.HaveOccurred())
release, err := exutil.GetLatestNightlyImage(maxUnsupportedVersion)
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("create HostedClusters with unsupported version")
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-" + caseID).
withReleaseImage(release).
withNodePoolReplicas(1)
defer installHelper.destroyAWSHostedClusters(createCluster)
hc := installHelper.createAWSHostedClusterWithoutCheck(createCluster)
exutil.By("check hc condition & nodepool condition")
o.Eventually(func() bool {
hcStatus := doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", `-o=jsonpath={.status.conditions[?(@.type=="ValidReleaseImage")].status}`)
if hcStatus != "False" {
return false
}
npStatus := doOcpReq(oc, OcpGet, false, "nodepool", "-n", hc.namespace, fmt.Sprintf(`-o=jsonpath={.items[?(@.spec.clusterName=="%s")].status.conditions[?(@.type=="ValidReleaseImage")].status}`, hc.name))
for _, st := range strings.Split(npStatus, " ") {
if st != "False" {
return false
}
}
return true
}, LongTimeout, LongTimeout/30).Should(o.BeTrue())
exutil.By("add annotation to skip release check")
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hc.name, "-n", hc.namespace, "hypershift.openshift.io/skip-release-image-validation=true")
skipReleaseImage := doOcpReq(oc, OcpGet, true, "hostedcluster", hc.name, "-n", hc.namespace, `-o=jsonpath={.metadata.annotations.hypershift\.openshift\.io/skip-release-image-validation}`)
o.Expect(skipReleaseImage).Should(o.ContainSubstring("true"))
exutil.By("check nodepool and hc to be recovered")
o.Eventually(func() bool {
hcStatus := doOcpReq(oc, OcpGet, false, "hostedcluster", hc.name, "-n", hc.namespace, "--ignore-not-found", `-o=jsonpath={.status.conditions[?(@.type=="ValidReleaseImage")].status}`)
if hcStatus != "True" {
return false
}
return true
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "hostedcluster ValidReleaseImage could not be recovered back error")
o.Eventually(func() bool {
npStatus := doOcpReq(oc, OcpGet, false, "nodepool", "-n", hc.namespace, fmt.Sprintf(`-o=jsonpath={.items[?(@.spec.clusterName=="%s")].status.conditions[?(@.type=="ValidReleaseImage")].status}`, hc.name))
for _, st := range strings.Split(npStatus, " ") {
if st != "True" {
return false
}
}
return true
}, LongTimeout, LongTimeout/10).Should(o.BeTrue(), "nodepool ValidReleaseImage could not be recovered back error")
o.Eventually(hc.pollHostedClustersReady(), ClusterInstallTimeout, ClusterInstallTimeout/10).Should(o.BeTrue(), "AWS HostedClusters install error")
exutil.By("create a new nodepool")
replica := 1
npName := caseID + strings.ToLower(exutil.RandStrDefault())
NewAWSNodePool(npName, hc.name, hc.namespace).
WithNodeCount(&replica).
WithReleaseImage(release).
CreateAWSNodePool()
o.Eventually(hc.pollCheckHostedClustersNodePoolReady(npName), LongTimeout, LongTimeout/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
}) | |||||
test case | openshift/openshift-tests-private | f2d55f24-64ae-4c96-83aa-59b0e5964725 | Longduration-NonPreRelease-Author:heli-Critical-67278-Critical-69222-[HyperShiftINSTALL] Test embargoed cluster upgrades imperceptibly [Serial] | ['"fmt"', '"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-67278-Critical-69222-[HyperShiftINSTALL] Test embargoed cluster upgrades imperceptibly [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 67278 and 69222 are for AWS - skipping test ...")
}
caseID := "67278-69222"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(2).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS).
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
hcpNS := hostedCluster.namespace + "-" + hostedCluster.name
exutil.By("check hostedcluster nodes ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(2), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
exutil.By("ocp-69222 check hosted cluster only expost port 443")
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.status.controlPlaneEndpoint.port}`)).Should(o.Equal("443"))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hcpNS, "service", "private-router", `-o=jsonpath={.spec.ports[?(@.targetPort=="https")].port}`)).Should(o.Equal("443"))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hcpNS, "service", "router", `-o=jsonpath={.spec.ports[?(@.targetPort=="https")].port}`)).Should(o.Equal("443"))
exutil.By("get management cluster cluster version and find the latest CI image")
hcpRelease := doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-ojsonpath={.spec.release.image}`)
mgmtVersion, mgmtBuild, err := exutil.GetClusterVersion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("hcp image is %s and mgmt cluster image is %s", hcpRelease, mgmtBuild)
ciImage, err := exutil.GetLatestImage(architecture.ClusterArchitecture(oc).String(), "ocp", mgmtVersion+".0-0.ci")
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("upgrade hcp to latest ci image by controlPlaneRelease")
doOcpReq(oc, OcpPatch, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, "--type=merge", fmt.Sprintf(`--patch={"spec": {"controlPlaneRelease": {"image":"%s"}}}`, ciImage))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.spec.controlPlaneRelease.image}`)).Should(o.ContainSubstring(ciImage))
exutil.By("check clusterversion operator in hcp is updated to ci image")
o.Eventually(func() bool {
images := doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp=cluster-version-operator", "--ignore-not-found", `-o=jsonpath={.items[*].spec.containers[*].image}`)
for _, image := range strings.Split(images, " ") {
if !strings.Contains(image, ciImage) {
return false
}
}
return true
}, LongTimeout, LongTimeout/20).Should(o.BeTrue(), "cluster version operator in hcp image not updated error")
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.spec.release.image}`)).Should(o.ContainSubstring(hcpRelease))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "hc", hostedCluster.name, `-o=jsonpath={.status.version.history[?(@.state=="Completed")].version}`)).Should(o.ContainSubstring(mgmtBuild))
o.Expect(doOcpReq(oc, OcpGet, true, "--kubeconfig="+hostedCluster.hostedClustersKubeconfigFile, "clusterversion", "version", `-o=jsonpath={.status.history[?(@.state=="Completed")].version}`)).Should(o.ContainSubstring(mgmtBuild))
o.Expect(doOcpReq(oc, OcpGet, true, "--kubeconfig="+hostedCluster.hostedClustersKubeconfigFile, "featuregate", "cluster", "--ignore-not-found", `-o=jsonpath={.status.featureGates[0].version}`)).Should(o.ContainSubstring(mgmtBuild))
exutil.By("create a new nodepool and check its version is still the old one")
npName := fmt.Sprintf("np-67278-%s", exutil.GetRandomString())
nodeCount := 1
defer hostedCluster.deleteNodePool(npName)
NewAWSNodePool(npName, hostedCluster.name, hostedCluster.namespace).WithNodeCount(&nodeCount).CreateAWSNodePool()
o.Eventually(hostedCluster.pollCheckHostedClustersNodePoolReady(npName), LongTimeout+DefaultTimeout, (LongTimeout+DefaultTimeout)/10).Should(o.BeTrue(), fmt.Sprintf("nodepool %s ready error", npName))
o.Expect(doOcpReq(oc, OcpGet, true, "-n", hostedCluster.namespace, "nodepool", npName, "--ignore-not-found", `-o=jsonpath={.spec.release.image}`)).Should(o.ContainSubstring(hcpRelease))
}) | |||||
test case | openshift/openshift-tests-private | 90344dcc-01bc-4deb-bd7d-55f8e64da85f | Longduration-NonPreRelease-Author:heli-High-62972-[HyperShiftINSTALL] Check conditional updates on HyperShift Hosted Control Plane [Serial] | ['"fmt"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-High-62972-[HyperShiftINSTALL] Check conditional updates on HyperShift Hosted Control Plane [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 62972 is for AWS - skipping test ...")
}
caseID := "62972"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
region: region,
}
installHelper.newAWSS3Client()
defer installHelper.deleteAWSS3Bucket()
installHelper.createAWSS3Bucket()
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift install "+
"--oidc-storage-provider-s3-bucket-name %s "+
"--oidc-storage-provider-s3-credentials %s "+
"--oidc-storage-provider-s3-region %s "+
"--enable-cvo-management-cluster-metrics-access=true "+
"--rhobs-monitoring=true ",
installHelper.bucketName, installHelper.dir+"/credentials", installHelper.region)
output, err := bashClient.Run(cmd).Output()
o.Expect(err).Should(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("when invoking this command with the --rhobs-monitoring flag, the --enable-cvo-management-cluster-metrics-access flag is not supported"))
}) | |||||
test case | openshift/openshift-tests-private | 2c228ffd-9aa9-46f4-9c60-09be8019e0bf | NonPreRelease-Longduration-Author:fxie-Critical-70614-[HyperShiftINSTALL] Test HostedCluster condition type AWSDefaultSecurityGroupDeleted [Serial] | ['"context"', '"encoding/json"', '"fmt"', '"io"', '"os"', '"path"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/aws/aws-sdk-go/aws/awserr"', '"github.com/aws/aws-sdk-go/aws/session"', '"github.com/aws/aws-sdk-go/service/elb"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"k8s.io/apimachinery/pkg/api/errors"', '"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"', '"k8s.io/apimachinery/pkg/fields"', '"k8s.io/apimachinery/pkg/runtime/schema"', 'errors2 "k8s.io/apimachinery/pkg/util/errors"', '"k8s.io/apimachinery/pkg/util/wait"', '"k8s.io/apimachinery/pkg/watch"', '"k8s.io/utils/ptr"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("NonPreRelease-Longduration-Author:fxie-Critical-70614-[HyperShiftINSTALL] Test HostedCluster condition type AWSDefaultSecurityGroupDeleted [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Running on %s while the test case is AWS-only, skipping", iaasPlatform))
}
var (
namePrefix = fmt.Sprintf("70614-%s", strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", namePrefix)
bucketName = fmt.Sprintf("%s-bucket", namePrefix)
hcName = fmt.Sprintf("%s-hc", namePrefix)
lbName = fmt.Sprintf("%s-lb", namePrefix)
targetConditionType = "AWSDefaultSecurityGroupDeleted"
watchTimeoutSec = 900
)
var (
unstructured2TypedCondition = func(condition any, typedCondition *metav1.Condition) {
g.GinkgoHelper()
conditionMap, ok := condition.(map[string]any)
o.Expect(ok).To(o.BeTrue(), "Failed to cast condition to map[string]any")
conditionJson, err := json.Marshal(conditionMap)
o.Expect(err).ShouldNot(o.HaveOccurred())
err = json.Unmarshal(conditionJson, typedCondition)
o.Expect(err).ShouldNot(o.HaveOccurred())
}
)
exutil.By("Installing the Hypershift Operator")
defer func() {
err := os.RemoveAll(tempDir)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err := os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a HostedCluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
// The number of worker nodes (of the hosted cluster) is irrelevant, so we only create one.
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withReleaseImage(release)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Getting default worker SG of the hosted cluster")
defaultWorkerSGID := doOcpReq(oc, OcpGet, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, `-o=jsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}`)
e2e.Logf("Found defaultWorkerSecurityGroupID = %s", defaultWorkerSGID)
exutil.By("Creating a dummy load balancer which has the default worker SG attached")
subnet := doOcpReq(oc, OcpGet, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, `-o=jsonpath={.spec.platform.aws.cloudProviderConfig.subnet.id}`)
e2e.Logf("Found subnet of the hosted cluster = %s", subnet)
clusterinfra.GetAwsCredentialFromCluster(oc)
elbClient := elb.New(session.Must(session.NewSession()), aws.NewConfig().WithRegion(region))
defer func() {
_, err = elbClient.DeleteLoadBalancer(&elb.DeleteLoadBalancerInput{
LoadBalancerName: aws.String(lbName),
})
// If the load balancer does not exist or has already been deleted, the call to DeleteLoadBalancer still succeeds.
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = elbClient.CreateLoadBalancer(&elb.CreateLoadBalancerInput{
Listeners: []*elb.Listener{
{
InstancePort: aws.Int64(80),
InstanceProtocol: aws.String("HTTP"),
LoadBalancerPort: aws.Int64(80),
Protocol: aws.String("HTTP"),
},
},
LoadBalancerName: aws.String(lbName),
Subnets: aws.StringSlice([]string{subnet}),
SecurityGroups: aws.StringSlice([]string{defaultWorkerSGID}),
})
if err != nil {
// Log a more granular error message if possible
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Error creating AWS load balancer (%s): %v", aerr.Code(), aerr)
}
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error creating AWS load balancer")
}
exutil.By("Delete the HostedCluster without waiting for the finalizers (non-blocking)")
doOcpReq(oc, OcpDelete, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, "--wait=false")
exutil.By("Polling until the AWSDefaultSecurityGroupDeleted condition is in false status")
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, false, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, fmt.Sprintf(`-o=jsonpath={.status.conditions[?(@.type=="%s")].status}`, targetConditionType))
}, LongTimeout, LongTimeout/10).Should(o.Equal("False"), "Timeout waiting for the AWSDefaultSecurityGroupDeleted condition to be in false status")
targetConditionMessage := doOcpReq(oc, OcpGet, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, fmt.Sprintf(`-o=jsonpath={.status.conditions[?(@.type=="%s")].message}`, targetConditionType))
e2e.Logf("Found message of the AWSDefaultSecurityGroupDeleted condition = %s", targetConditionMessage)
exutil.By("Start watching the HostedCluster with a timeout")
hcRestMapping, err := oc.RESTMapper().RESTMapping(schema.GroupKind{
Group: "hypershift.openshift.io",
Kind: "HostedCluster",
})
o.Expect(err).ShouldNot(o.HaveOccurred())
w, err := oc.AdminDynamicClient().Resource(hcRestMapping.Resource).Namespace(hostedCluster.namespace).Watch(context.Background(), metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", hostedCluster.name).String(),
TimeoutSeconds: ptr.To(int64(watchTimeoutSec)),
})
o.Expect(err).ShouldNot(o.HaveOccurred())
defer w.Stop()
exutil.By("Now delete the load balancer created above")
_, err = elbClient.DeleteLoadBalancer(&elb.DeleteLoadBalancerInput{
LoadBalancerName: aws.String(lbName),
})
if err != nil {
// Log a more granular error message if possible
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Error deleting AWS load balancer (%s): %v", aerr.Code(), aerr)
}
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error deleting AWS load balancer")
}
exutil.By("Examining MODIFIED events that occurs on the HostedCluster")
var typedCondition metav1.Condition
var targetConditionExpected bool
resultChan := w.ResultChan()
outerForLoop:
for event := range resultChan {
if event.Type != watch.Modified {
continue
}
e2e.Logf("MODIFIED event captured")
// Avoid conversion to typed object as it'd bring in quite a few dependencies to the repo
hcUnstructured, ok := event.Object.(*unstructured.Unstructured)
o.Expect(ok).To(o.BeTrue(), "Failed to cast event.Object into *unstructured.Unstructured")
conditions, found, err := unstructured.NestedSlice(hcUnstructured.Object, "status", "conditions")
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(found).To(o.BeTrue())
for _, condition := range conditions {
unstructured2TypedCondition(condition, &typedCondition)
if typedCondition.Type != targetConditionType {
continue
}
if typedCondition.Status == metav1.ConditionTrue {
e2e.Logf("Found AWSDefaultSecurityGroupDeleted condition = %s", typedCondition)
targetConditionExpected = true
break outerForLoop
}
e2e.Logf("The AWSDefaultSecurityGroupDeleted condition is found to be in %s status, keep waiting", typedCondition.Status)
}
}
// The result channel could be closed since the beginning, e.g. when an inappropriate ListOptions is passed to Watch
// We need to ensure this is not the case
o.Expect(targetConditionExpected).To(o.BeTrue(), "Result channel closed unexpectedly before the AWSDefaultSecurityGroupDeleted condition becomes true in status")
exutil.By("Polling until the HostedCluster is gone")
o.Eventually(func() bool {
_, err := oc.AdminDynamicClient().Resource(hcRestMapping.Resource).Namespace(hostedCluster.namespace).Get(context.Background(), hostedCluster.name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return true
}
o.Expect(err).ShouldNot(o.HaveOccurred(), fmt.Sprintf("Unexpected error: %s", errors.ReasonForError(err)))
e2e.Logf("Still waiting for the HostedCluster to disappear")
return false
}, LongTimeout, LongTimeout/10).Should(o.BeTrue(), "Timed out waiting for the HostedCluster to disappear")
}) | |||||
test case | openshift/openshift-tests-private | f60b41c2-b5b4-499e-8cc1-24f3bbd5301f | Longduration-NonPreRelease-Author:heli-Critical-64409-[HyperShiftINSTALL] Ensure ingress controllers are removed before load balancers [Serial] | ['"context"', '"encoding/json"', '"fmt"', '"io"', '"os"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', 'appsv1 "k8s.io/api/apps/v1"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/watch"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'operatorv1 "github.com/openshift/api/operator/v1"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-Critical-64409-[HyperShiftINSTALL] Ensure ingress controllers are removed before load balancers [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 64409 is for AWS - skipping test ...")
}
caseID := "64409"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
// files to store delete time result
var svcDeleteTimeStampFile = dir + "/svc-deletion-time-stamp-result.txt"
var ingressControllerDeleteTimeStampFile = dir + "/ingress-controller-deletion-time-stamp-result.txt"
exutil.By("Config AWS Bucket And install HyperShift operator")
bucketName := "hypershift-" + caseID + "-" + strings.ToLower(exutil.RandStrDefault())
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create HostedClusters config")
nodeReplicas := 1
createCluster := installHelper.createClusterAWSCommonBuilder().
withName("hypershift-"+caseID).
withNodePoolReplicas(nodeReplicas).
withAnnotation("hypershift.openshift.io/cleanup-cloud-resources", "true").
withEndpointAccess(PublicAndPrivate).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS)
exutil.By("add watcher to catch the resource deletion info")
svcCtx, svcCancel := context.WithTimeout(context.Background(), ClusterInstallTimeout+LongTimeout)
defer svcCancel()
operatorCtx, operatorCancel := context.WithTimeout(context.Background(), ClusterInstallTimeout+LongTimeout)
defer operatorCancel()
defer func() {
// destroy hosted cluster
installHelper.destroyAWSHostedClusters(createCluster)
e2e.Logf("check destroy AWS HostedClusters")
o.Eventually(pollGetHostedClusters(oc, createCluster.Namespace), ShortTimeout, ShortTimeout/10).ShouldNot(o.ContainSubstring(createCluster.Name), "destroy AWS HostedClusters error")
exutil.By("check the ingress controllers are removed before load balancers")
// get resource deletion time
svcDelTimeStr, err := os.ReadFile(svcDeleteTimeStampFile)
o.Expect(err).NotTo(o.HaveOccurred())
ingressDelTimeStr, err := os.ReadFile(ingressControllerDeleteTimeStampFile)
o.Expect(err).NotTo(o.HaveOccurred())
ingressDelTime, err := time.Parse(time.RFC3339, string(ingressDelTimeStr))
o.Expect(err).NotTo(o.HaveOccurred())
routeSVCTime, err := time.Parse(time.RFC3339, string(svcDelTimeStr))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the ingress controllers are removed before load balancers")
e2e.Logf("parsed deletion time ingresscontroller: %s, route svc: %s", ingressDelTime, routeSVCTime)
o.Expect(ingressDelTime.After(routeSVCTime)).Should(o.BeFalse())
}()
exutil.By("create a hosted cluster")
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
exutil.By("create HostedClusters node ready")
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
o.Eventually(hostedCluster.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/10).Should(o.Equal(nodeReplicas), fmt.Sprintf("not all nodes in hostedcluster %s are in ready state", hostedCluster.name))
hostedCluster.oc.SetGuestKubeconf(hostedCluster.hostedClustersKubeconfigFile)
exutil.By("start a goroutine to watch delete time for the hosted cluster svc router-default")
svcName := "router-default"
svcNamespace := "openshift-ingress"
startWatch(svcCtx, hostedCluster.hostedClustersKubeconfigFile, watchInfo{
resourceType: Service,
name: svcName,
namespace: svcNamespace,
deleteFunc: func(obj interface{}) {
svcObj, ok := obj.(*corev1.Service)
if ok != true {
return
}
if svcObj.Name == svcName && svcObj.DeletionTimestamp.IsZero() == false {
e2e.Logf("[deleteFunc] catched the deletion time of service %s in %s, deletionTimestamp is %s", svcObj.Name, svcObj.Namespace, svcObj.DeletionTimestamp.String())
err = os.WriteFile(svcDeleteTimeStampFile, []byte(fmt.Sprintf("%s", svcObj.DeletionTimestamp.Format(time.RFC3339))), 0644)
if err != nil {
e2e.Logf("[deleteFunc] fail to write service %s in %s deletion time [%s] into local file %s, error %s", svcObj.Name, svcObj.Namespace, svcObj.DeletionTimestamp.String(), svcDeleteTimeStampFile, err.Error())
}
svcCancel()
}
},
})
exutil.By("start a goroutine to watch delete time for the hosted cluster ingresscontroller default")
icName := "default"
icNamespace := "openshift-ingress-operator"
startWatchOperator(operatorCtx, hostedCluster.hostedClustersKubeconfigFile, operatorWatchInfo{
group: "operator.openshift.io",
version: "v1",
resources: "ingresscontrollers",
name: icName,
namespace: icNamespace,
deleteFunc: func(obj []byte) {
ingressObj := operatorv1.IngressController{}
if json.Unmarshal(obj, &ingressObj) != nil {
e2e.Logf("[deleteFunc] unmarshal ingresscontrollers %s in %s error %s", icName, icNamespace, err.Error())
return
}
if ingressObj.Name == icName && ingressObj.DeletionTimestamp.IsZero() == false {
e2e.Logf("[deleteFunc] catched deletion time of ingresscontroller %s in %s, deletionTimestamp is %s", ingressObj.Name, ingressObj.Namespace, ingressObj.DeletionTimestamp.String())
err = os.WriteFile(ingressControllerDeleteTimeStampFile, []byte(fmt.Sprintf("%s", ingressObj.DeletionTimestamp.Format(time.RFC3339))), 0644)
if err != nil {
e2e.Logf("[deleteFunc] fail to write ingresscontroller %s in %s deletion time [%s] into local file %s, error %s", ingressObj.Name, ingressObj.Namespace, ingressObj.DeletionTimestamp.String(), ingressControllerDeleteTimeStampFile, err.Error())
}
operatorCancel()
}
},
})
}) | |||||
test case | openshift/openshift-tests-private | a0bdd173-92a8-41d9-a190-074d7f2e1470 | NonPreRelease-Longduration-Author:fxie-Critical-68221-[HyperShiftINSTALL] Test the scheduler to only accept paired Nodes and check scheduler HCs has two Nodes [Disruptive] | ['"context"', '"fmt"', '"io"', '"os"', '"path"', '"path/filepath"', '"reflect"', '"sort"', '"strings"', '"text/template"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"k8s.io/apimachinery/pkg/api/errors"', '"k8s.io/apimachinery/pkg/fields"', '"k8s.io/apimachinery/pkg/labels"', '"k8s.io/apimachinery/pkg/util/diff"', 'errors2 "k8s.io/apimachinery/pkg/util/errors"', '"k8s.io/apimachinery/pkg/util/wait"', '"k8s.io/kubernetes/pkg/util/taints"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', '"k8s.io/utils/strings/slices"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("NonPreRelease-Longduration-Author:fxie-Critical-68221-[HyperShiftINSTALL] Test the scheduler to only accept paired Nodes and check scheduler HCs has two Nodes [Disruptive]", func() {
// Variables
var (
testCaseId = "68221"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
mhcTemplate = filepath.Join(fixturePath, "mhc.yaml")
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
mhcNamePrefix = fmt.Sprintf("%s-mhc", resourceNamePrefix)
adminKubeClient = oc.AdminKubeClient()
numWorkersExpected = 3
numMasters = 3
numMsetsExpected = 3
aggregatedErr []error
)
// Utilities
var (
findServingPairIdx = func(servingPairsNodeNames [][]string, podNodeName string) (int, bool) {
e2e.Logf("Finding serving pair index")
for idx, servingPairNodeNames := range servingPairsNodeNames {
if slices.Contains(servingPairNodeNames, podNodeName) {
return idx, true
}
}
return -1, false
}
checkPodNodeAffinity = func(pod *corev1.Pod, hostedClusterIdentifier string) {
nodeSelectorRequirements := pod.Spec.Affinity.NodeAffinity.
RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions
expectedNodeSelectorRequirements := []corev1.NodeSelectorRequirement{
{
Key: servingComponentNodesLabelKey,
Operator: corev1.NodeSelectorOpIn,
Values: []string{"true"},
},
{
Key: hypershiftClusterLabelKey,
Operator: corev1.NodeSelectorOpIn,
Values: []string{hostedClusterIdentifier},
},
}
// Assume the key to be unique across NodeSelectorRequirements
sort.Slice(nodeSelectorRequirements, func(i, j int) bool {
return nodeSelectorRequirements[i].Key < nodeSelectorRequirements[j].Key
})
sort.Slice(expectedNodeSelectorRequirements, func(i, j int) bool {
return expectedNodeSelectorRequirements[i].Key < expectedNodeSelectorRequirements[j].Key
})
// Pretty-print actual and expected NodeSelectorRequirements side-by-side for comparison in case they do not match
if !reflect.DeepEqual(nodeSelectorRequirements, expectedNodeSelectorRequirements) {
e2e.Logf(diff.ObjectGoPrintSideBySide(nodeSelectorRequirements, expectedNodeSelectorRequirements))
e2e.Failf("Unexpected node affinity for pod")
}
e2e.Logf("Node affinity expected")
}
// Delete serving node by scaling down the corresponding serving MachineSet
// Return the name of the MachineSet scaled down, so it can be scaled back up later
deleteServingNode = func(allNodeNames, allMsetNames []string, servingNodeName string) string {
g.GinkgoHelper()
servingNodeIdx := slices.Index(allNodeNames, servingNodeName)
o.Expect(servingNodeIdx).To(o.BeNumerically(">=", 0), fmt.Sprintf("Serving node %s not found in %v", servingNodeName, allNodeNames))
msetName := allMsetNames[servingNodeIdx]
doOcpReq(oc, OcpScale, true, "--replicas=0", fmt.Sprintf("%s/%s", mapiMachineset, msetName), "-n", machineAPINamespace)
exutil.WaitForNodeToDisappear(oc, servingNodeName, LongTimeout, DefaultTimeout/10)
return msetName
}
checkServingNodePairLabelsAndTaints = func(hostedClusterIdentifier string, servingPairIdx int) {
// Get serving nodes
nodeList, err := adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{
hypershiftClusterLabelKey: hostedClusterIdentifier,
osdfmPairedNodeLabelKey: fmt.Sprintf("serving-%v", servingPairIdx),
}).String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCount := len(nodeList.Items); nodeCount != 2 {
var nodeNames []string
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)
}
e2e.Failf("Expect 2 serving nodes but found %v (%v)", nodeCount, nodeNames)
}
for _, node := range nodeList.Items {
o.Expect(taints.TaintExists(node.Spec.Taints, &corev1.Taint{
Effect: "NoSchedule",
Key: hypershiftClusterLabelKey,
Value: hostedClusterIdentifier,
})).To(o.BeTrue())
}
}
// Not all fields of a resource are supported as field selectors.
// Here we list all deployments in the namespace for simplicity.
waitForHostedClusterDeploymentsReady = func(ns string) {
exutil.WaitForDeploymentsReady(context.Background(), func(ctx context.Context) (*appsv1.DeploymentList, error) {
return adminKubeClient.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{})
}, exutil.IsDeploymentReady, LongTimeout, DefaultTimeout/10, false)
}
)
// Report all non-nil errors occurred in deferred functions
defer func() {
o.Expect(errors2.NewAggregate(aggregatedErr)).NotTo(o.HaveOccurred())
}()
// Needs MAPI for MachineSets
exutil.SkipNoCapabilities(oc, "MachineAPI")
if iaasPlatform != "aws" {
g.Skip(fmt.Sprintf("Running on %s while the test case is AWS-only, skipping", iaasPlatform))
}
exutil.By("Getting info about the management cluster")
msetNames := clusterinfra.ListWorkerMachineSetNames(oc)
// In theory the number of MachineSets does not have to be exactly 3 but should be at least 3.
// The following is enforced for alignment with the test case.
if numMset := len(msetNames); numMset != numMsetsExpected {
g.Skip("Expect %v worker MachineSets but found %v, skipping", numMsetsExpected, numMset)
}
mset1Name := msetNames[0]
mset2Name := msetNames[1]
mset3Name := msetNames[2]
e2e.Logf("Found worker MachineSets %v on the management cluster", msetNames)
nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), adminKubeClient)
o.Expect(err).NotTo(o.HaveOccurred())
// In theory the number of ready schedulable Nodes does not have to be exactly 3 but should be at least 3.
// The following is enforced for alignment with the test case.
numReadySchedulableNodes := len(nodeList.Items)
if numReadySchedulableNodes != numWorkersExpected {
g.Skip("Expect %v ready schedulable nodes but found %v, skipping", numWorkersExpected, numReadySchedulableNodes)
}
defer func() {
e2e.Logf("Making sure we ends up with the correct number of nodes and all of them are ready and schedulable")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, DefaultTimeout, true, func(_ context.Context) (bool, error) {
nodeList, err = adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{"node-role.kubernetes.io/worker": ""}).String(),
})
if err != nil {
return false, err
}
if numWorker := len(nodeList.Items); numWorker != numWorkersExpected {
e2e.Logf("Expect %v worker nodes but found %v, keep polling", numWorkersExpected, numWorker)
return false, nil
}
for _, node := range nodeList.Items {
if !e2enode.IsNodeReady(&node) {
e2e.Logf("Worker node %v not ready, keep polling", node.Name)
return false, nil
}
if len(node.Spec.Taints) > 0 {
e2e.Logf("Worker node tainted, keep polling", node.Name)
return false, nil
}
}
return true, nil
})
aggregatedErr = append(aggregatedErr, err)
}()
numNode := numReadySchedulableNodes + numMasters
e2e.Logf("Found %v nodes on the management cluster", numNode)
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Found management cluster region = %s", region)
// Create (non-spot) MachineSets based on existing ones for simplicity
exutil.By("Creating additional worker nodes through MachineSets on the management cluster")
e2e.Logf("Creating 2 MachineSets in the first AZ")
extraMset1Az1Name := mset1Name + fmt.Sprintf("-%s-1", testCaseId)
extraMset1Az1 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset1Az1Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset1Az1.DeleteMachineSet(oc))
}()
extraMset1Az1.CreateMachineSetBasedOnExisting(oc, mset1Name, false)
extraMset2Az1Name := mset1Name + fmt.Sprintf("-%s-2", testCaseId)
extraMset2Az1 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset2Az1Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset2Az1.DeleteMachineSet(oc))
}()
extraMset2Az1.CreateMachineSetBasedOnExisting(oc, mset1Name, false)
e2e.Logf("Creating a MachineSet in the second AZ")
extraMset1Az2Name := mset2Name + fmt.Sprintf("-%s-1", testCaseId)
extraMset1Az2 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset1Az2Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset1Az2.DeleteMachineSet(oc))
}()
extraMset1Az2.CreateMachineSetBasedOnExisting(oc, mset2Name, false)
e2e.Logf("Creating a MachineSet in the third AZ")
extraMset1Az3Name := mset3Name + fmt.Sprintf("-%s-1", testCaseId)
extraMset1Az3 := clusterinfra.MachineSetNonSpotDescription{
Name: extraMset1Az3Name,
Replicas: 1,
}
defer func() {
aggregatedErr = append(aggregatedErr, extraMset1Az3.DeleteMachineSet(oc))
}()
extraMset1Az3.CreateMachineSetBasedOnExisting(oc, mset3Name, false)
e2e.Logf("Waiting until the desired number of Nodes are ready")
_, err = e2enode.CheckReady(context.Background(), adminKubeClient, numNode+4, LongTimeout)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Getting Node name for each MachineSet and define node grouping")
allMsetNames := []string{mset1Name, mset2Name, mset3Name, extraMset1Az1Name, extraMset2Az1Name, extraMset1Az2Name, extraMset1Az3Name}
e2e.Logf("All MachineSets = %v", allMsetNames)
servingMsetNames := []string{mset1Name, mset2Name, extraMset1Az1Name, extraMset1Az2Name}
e2e.Logf("Serving MachineSets = %v", servingMsetNames)
var allWorkerNodeNames []string
for _, msetName := range allMsetNames {
allWorkerNodeNames = append(allWorkerNodeNames, exutil.GetNodeNameByMachineset(oc, msetName))
}
e2e.Logf("All worker nodes = %v", allWorkerNodeNames)
servingPair1NodeNames := []string{allWorkerNodeNames[0], allWorkerNodeNames[1]}
e2e.Logf("Serving pair 1 nodes = %v", servingPair1NodeNames)
nonServingNode := allWorkerNodeNames[2]
e2e.Logf("Non serving node = %v", nonServingNode)
servingPair2NodeNames := []string{allWorkerNodeNames[3], allWorkerNodeNames[5]}
e2e.Logf("Serving pair 2 nodes = %v", servingPair1NodeNames)
hoPodNodeNames := []string{allWorkerNodeNames[4], allWorkerNodeNames[6]}
e2e.Logf("Nodes for Hypershift Operator Pods = %v", hoPodNodeNames)
servingPairs := [][]string{servingPair1NodeNames, servingPair2NodeNames}
servingPairNodeNames := append(servingPair1NodeNames, servingPair2NodeNames...)
exutil.By("Creating a MachineHealthCheck for each serving MachineSet")
infraId := doOcpReq(oc, OcpGet, true, "infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}")
e2e.Logf("Found infra ID = %s", infraId)
for _, msetName := range servingMsetNames {
mhcName := fmt.Sprintf("%s-%s", mhcNamePrefix, msetName)
parsedTemplate := fmt.Sprintf("%s.template", mhcName)
mhc := mhcDescription{
Clusterid: infraId,
Maxunhealthy: "100%",
MachinesetName: msetName,
Name: mhcName,
Namespace: machineAPINamespace,
template: mhcTemplate,
}
defer mhc.deleteMhc(oc, parsedTemplate)
mhc.createMhc(oc, parsedTemplate)
}
exutil.By("Adding labels and taints on the serving node pairs and a non serving node")
e2e.Logf("Adding labels and taints on the serving node pairs")
defer func() {
for _, servingPairNodeNames := range servingPairs {
for _, nodeName := range servingPairNodeNames {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", nodeName, servingComponentNodesTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nodeName, servingComponentNodesLabelKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nodeName, osdfmPairedNodeLabelKey+"-").Execute()
}
}
}()
for idx, servingPairNodeNames := range servingPairs {
for _, nodeName := range servingPairNodeNames {
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", nodeName, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", nodeName, servingComponentNodesLabel)
doOcpReq(oc, OcpLabel, true, "node", nodeName, fmt.Sprintf("%s=serving-%v", osdfmPairedNodeLabelKey, idx))
}
}
e2e.Logf("Adding labels and taints on the non serving node")
defer func() {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", nonServingNode, nonServingComponentTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nonServingNode, nonServingComponentLabelKey+"-").Execute()
}()
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", nonServingNode, nonServingComponentTaint)
doOcpReq(oc, OcpLabel, true, "node", nonServingNode, nonServingComponentLabel)
exutil.By("Installing the Hypershift Operator")
defer func() {
aggregatedErr = append(aggregatedErr, os.RemoveAll(tempDir))
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
// At this point HO Pods are ready so no need to poll
e2e.Logf("Making sure HO Pods are scheduled on the nodes without taints")
podList, err := adminKubeClient.CoreV1().Pods(hypershiftOperatorNamespace).List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{"app": "operator"}).String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podList.Items).To(o.HaveLen(2))
var actualHoPodNodeNames []string
for _, pod := range podList.Items {
actualHoPodNodeNames = append(actualHoPodNodeNames, pod.Spec.NodeName)
}
sort.Strings(hoPodNodeNames)
sort.Strings(actualHoPodNodeNames)
o.Expect(hoPodNodeNames).To(o.Equal(actualHoPodNodeNames))
exutil.By("Creating a hosted cluster with request serving annotation")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
// The number of worker nodes (of the hosted cluster) is irrelevant, so we will only create one.
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withAnnotation(hcTopologyAnnotationKey, "dedicated-request-serving-components").
withReleaseImage(release)
defer installHelper.deleteHostedClustersManual(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
hostedClusterIdentifier := fmt.Sprintf("%s-%s", hostedCluster.namespace, hostedCluster.name)
e2e.Logf("Hosted cluster created with identifier = %s", hostedClusterIdentifier)
// At this point (minutes after the installation of the Hypershift operator)
// we expect all labels and taints to be set by controller so no need for polling.
exutil.By("Making sure all hosted cluster components are correctly scheduled")
// No need to check tolerations as the correct scheduling of Pods implies correct toleration settings
exutil.By("Making sure the correct labels and nodeAffinities are set on the request serving components")
requestServingComponentLabelSelector := labels.SelectorFromSet(map[string]string{servingComponentPodLabelKey: "true"})
podList, err = adminKubeClient.CoreV1().Pods(hostedClusterIdentifier).List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items)).NotTo(o.BeZero(), "Empty pod list")
var servingNodeName string
for _, pod := range podList.Items {
podNodeName := pod.Spec.NodeName
if requestServingComponentLabelSelector.Matches(labels.Set(pod.Labels)) {
e2e.Logf("Pod %s belongs to a request serving component", pod.Name)
// Make sure the request serving Pod is correctly scheduled
if len(servingNodeName) == 0 {
servingNodeName = podNodeName
o.Expect(servingPairNodeNames).To(o.ContainElements(servingNodeName), "Pod scheduled on a non serving node")
e2e.Logf("Found serving node = %v", servingNodeName)
} else {
o.Expect(servingNodeName).To(o.Equal(podNodeName), fmt.Sprintf("Expect Pod to be scheduled on serving node %s but scheduled on %s", servingNodeName, podNodeName))
}
// Make sure the request serving Pod has the correct nodeAffinities
checkPodNodeAffinity(&pod, hostedClusterIdentifier)
continue
}
e2e.Logf("Pod %s belongs to a non request serving component", pod.Name)
// Make sure the non request serving Pod is correctly scheduled
o.Expect(nonServingNode).To(o.Equal(podNodeName), fmt.Sprintf("Expect Pod to be scheduled on non serving node %s but scheduled on %s", nonServingNode, podNodeName))
}
o.Expect(servingNodeName).NotTo(o.BeEmpty(), "Serving node not found")
exutil.By("Making sure that labels and taints are correctly set on the serving nodes pair")
servingPairIdx, idxFound := findServingPairIdx(servingPairs, servingNodeName)
o.Expect(idxFound).To(o.BeTrue())
e2e.Logf("Found serving pair index = %v; serving nodes = %v", servingPairIdx, servingPairs[servingPairIdx])
checkServingNodePairLabelsAndTaints(hostedClusterIdentifier, servingPairIdx)
exutil.By("Making sure the cluster-scheduled annotation is set on the HostedCluster")
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostedCluster", hostedCluster.name, "-n", hostedCluster.namespace, `-o=jsonpath={.metadata.annotations.hypershift\.openshift\.io/cluster-scheduled}`).Outputs()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("true"))
exutil.By("Delete the serving node by scaling down the corresponding MachineSet")
var msetName1 string
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run(OcpScale).Args("--replicas=1", fmt.Sprintf("%s/%s", mapiMachineset, msetName1), "-n", machineAPINamespace).Execute()
aggregatedErr = append(aggregatedErr, err)
}()
msetName1 = deleteServingNode(allWorkerNodeNames, allMsetNames, servingNodeName)
exutil.By("Making sure serving components are moved to the other node in the serving node pair")
e2e.Logf("Finding the new (expected) serving node")
var servingNodeName2 string
for _, nodeName := range servingPairs[servingPairIdx] {
if servingNodeName != nodeName {
servingNodeName2 = nodeName
break
}
}
o.Expect(servingNodeName2).NotTo(o.Equal(servingNodeName))
e2e.Logf("Making sure serving component Pods are moved to the new serving node")
waitForHostedClusterDeploymentsReady(hostedClusterIdentifier)
podList, err = adminKubeClient.CoreV1().Pods(hostedClusterIdentifier).List(context.Background(), metav1.ListOptions{
LabelSelector: requestServingComponentLabelSelector.String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items)).NotTo(o.BeZero(), "Empty pod list")
for _, pod := range podList.Items {
e2e.Logf("Pod %s belongs to a request serving component", pod.Name)
o.Expect(servingNodeName2).To(o.Equal(pod.Spec.NodeName), fmt.Sprintf("Expect Pod to be scheduled on serving node %s but scheduled on %s", servingNodeName2, pod.Spec.NodeName))
}
exutil.By("Delete the new serving node by scaling down the corresponding MachineSet")
var msetName2 string
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run(OcpScale).Args("--replicas=1", fmt.Sprintf("%s/%s", mapiMachineset, msetName2), "-n", machineAPINamespace).Execute()
aggregatedErr = append(aggregatedErr, err)
}()
msetName2 = deleteServingNode(allWorkerNodeNames, allMsetNames, servingNodeName2)
exutil.By("Making sure that serving components are moved to a node belonging to the other serving node pair")
waitForHostedClusterDeploymentsReady(hostedClusterIdentifier)
// servingPairIdx = 0 or 1
servingPairIdx2 := 1 - servingPairIdx
e2e.Logf("New serving pair index = %v; serving nodes = %v", servingPairIdx2, servingPairs[servingPairIdx2])
podList, err = adminKubeClient.CoreV1().Pods(hostedClusterIdentifier).List(context.Background(), metav1.ListOptions{
LabelSelector: requestServingComponentLabelSelector.String(),
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items)).NotTo(o.BeZero(), "Empty pod list")
var servingNodeName3 string
for _, pod := range podList.Items {
e2e.Logf("Pod %s belongs to a request serving component", pod.Name)
podNodeName := pod.Spec.NodeName
if len(servingNodeName3) == 0 {
servingNodeName3 = podNodeName
o.Expect(servingPairs[servingPairIdx2]).To(o.ContainElements(servingNodeName3))
e2e.Logf("Found serving node = %v", servingNodeName3)
} else {
o.Expect(servingNodeName3).To(o.Equal(podNodeName))
}
}
o.Expect(servingNodeName3).NotTo(o.BeEmpty(), "Serving node not found")
exutil.By("Making sure that labels and taints are correctly set on the serving node pair")
checkServingNodePairLabelsAndTaints(hostedClusterIdentifier, servingPairIdx2)
exutil.By("Destroying the hosted cluster")
installHelper.destroyAWSHostedClusters(createCluster)
exutil.By("Making sure serving nodes are deleted")
for _, node := range servingPairs[servingPairIdx2] {
exutil.WaitForNodeToDisappear(oc, node, LongTimeout, DefaultTimeout/10)
}
exutil.By("Making sure two new nodes are created by MAPI")
// 4 new MachineSets, 2 scaled down, 2 deleted and then re-created => 2 additional nodes
nodeListFinal, err := e2enode.CheckReady(context.Background(), adminKubeClient, numNode+2, LongTimeout)
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("Making sure that the two new nodes does not contain specific label and taint")
var newNodeCount int
for _, node := range nodeListFinal {
nodeName := node.Name
if slices.Contains(allWorkerNodeNames, nodeName) {
e2e.Logf("Skip old worker node %s", nodeName)
continue
}
if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok {
e2e.Logf("Skip master node %s", nodeName)
continue
}
e2e.Logf("Inspecting labels and taints on new worker node/%s", nodeName)
newNodeCount++
_, ok := node.Labels[hypershiftClusterLabelKey]
o.Expect(ok).To(o.BeFalse())
o.Expect(taints.TaintExists(node.Spec.Taints, &corev1.Taint{
Effect: "NoSchedule",
Key: hypershiftClusterLabelKey,
Value: hostedClusterIdentifier,
})).To(o.BeFalse())
}
o.Expect(newNodeCount).To(o.Equal(2))
}) | |||||
test case | openshift/openshift-tests-private | 1cadb188-0a04-4a69-bcf7-8c75a3a62e5e | Longduration-NonPreRelease-Author:heli-High-64847-[HyperShiftINSTALL] Ensure service type of loadBalancer associated with ingress controller is deleted by ingress-controller role [Serial] | ['"fmt"', '"io"', '"os"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', 'appsv1 "k8s.io/api/apps/v1"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/util/wait"', 'operatorv1 "github.com/openshift/api/operator/v1"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:heli-High-64847-[HyperShiftINSTALL] Ensure service type of loadBalancer associated with ingress controller is deleted by ingress-controller role [Serial]", func() {
if iaasPlatform != "aws" {
g.Skip("IAAS platform is " + iaasPlatform + " while 64847 is for AWS - skipping test ...")
}
caseID := "64847"
dir := "/tmp/hypershift" + caseID
defer os.RemoveAll(dir)
err := os.MkdirAll(dir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
namePrefix = fmt.Sprintf("64847-%s", strings.ToLower(exutil.RandStrDefault()))
hcName = "hc-" + strings.ToLower(namePrefix)
bucketName = "hc-" + strings.ToLower(namePrefix)
svcTempFile = dir + "/svc.yaml"
svcName = "test-lb-svc-64847"
testSVC = fmt.Sprintf(`
apiVersion: v1
kind: Service
metadata:
name: %s
namespace: default
spec:
ports:
- port: 80
targetPort: 8080
selector:
name: test-pod
type: LoadBalancer
`, svcName)
)
exutil.By("install hypershift operator")
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: dir,
iaasPlatform: iaasPlatform,
installType: Public,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("create a hosted cluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withReleaseImage(release)
hcpNS := createCluster.Namespace + "-" + hcName
defer func() {
exutil.By("destroy hosted cluster in one goroutine")
go func() {
defer g.GinkgoRecover()
installHelper.destroyAWSHostedClusters(createCluster)
}()
if oc.GetGuestKubeconf() != "" {
exutil.By("check LB test SVC is deleted")
o.Eventually(func() bool {
testSVC, err := oc.AsGuestKubeconf().Run(OcpGet).Args("svc", svcName, "--ignore-not-found", `-o=jsonpath={.metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if testSVC == "" {
return true
}
e2e.Logf("check if the test svc is deleted by hcco")
return false
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Timed out waiting for the the ingress-operator pods scaling down to zero")
exutil.By("check HCCO logs that deletion is stuck by LB SVC resources")
routerDefaultSVC, err := oc.AsGuestKubeconf().Run(OcpGet).Args("-n", "openshift-ingress", "svc", "router-default", "--ignore-not-found", `-o=jsonpath={.metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(routerDefaultSVC).Should(o.Equal("router-default"))
hccoPodName := doOcpReq(oc, OcpGet, true, "pod", "-n", hcpNS, "-lapp=hosted-cluster-config-operator", "--ignore-not-found", `-o=jsonpath={.items[].metadata.name}`)
_, err = exutil.WaitAndGetSpecificPodLogs(oc, hcpNS, "", hccoPodName, "'Ensuring load balancers are removed'")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("remove ingress-operator debug annotation and scale up ingress-operator")
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hcName, "-n", createCluster.Namespace, "hypershift.openshift.io/debug-deployments-")
doOcpReq(oc, OcpScale, true, "deployment", "ingress-operator", "-n", hcpNS, "--replicas=1")
}
exutil.By("wait until the hosted cluster is deleted successfully")
o.Eventually(pollGetHostedClusters(oc, createCluster.Namespace), LongTimeout, LongTimeout/10).ShouldNot(o.ContainSubstring(hcName), "destroy AWS HostedClusters error")
}()
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
installHelper.createHostedClusterKubeconfig(createCluster, hostedCluster)
oc.SetGuestKubeconf(hostedCluster.getHostedClusterKubeconfigFile())
exutil.By("annotate the hosted cluster to debug ingress operator")
doOcpReq(oc, OcpAnnotate, true, "hostedcluster", hostedCluster.name, "-n", hostedCluster.namespace, "hypershift.openshift.io/debug-deployments=ingress-operator")
o.Eventually(func() bool {
names := doOcpReq(oc, OcpGet, false, "pod", "-n", hcpNS, "--ignore-not-found", "-lapp=ingress-operator", "-o=jsonpath={.items[*].metadata.name}")
if names == "" {
return true
}
e2e.Logf("Still waiting for the ingress-operator pods scaling down to zero")
return false
}, DefaultTimeout, DefaultTimeout/10).Should(o.BeTrue(), "Timed out waiting for the the ingress-operator pods scaling down to zero")
o.Expect(doOcpReq(oc, OcpGet, true, "deploy", "ingress-operator", "-n", hcpNS, "--ignore-not-found", "-o=jsonpath={.spec.replicas}")).Should(o.Equal("0"))
exutil.By("create LB SVC on the hosted cluster")
err = os.WriteFile(svcTempFile, []byte(testSVC), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsGuestKubeconf().WithoutNamespace().Run(OcpCreate).Args("-f", svcTempFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.