element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case | openshift/openshift-tests-private | caaf5357-20c4-4bba-9db6-6aa51bc12ece | NonPreRelease-Longduration-Author:fxie-Critical-69771-[HyperShiftINSTALL] When initial non-serving nodes fill up new pods prefer to go to untainted default nodes instead of scaling non-serving ones [Disruptive] | ['"context"', '"fmt"', '"io"', '"os"', '"path"', '"path/filepath"', '"strings"', '"text/template"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', 'appsv1 "k8s.io/api/apps/v1"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/labels"', '"k8s.io/apimachinery/pkg/util/wait"', '"k8s.io/kubernetes/pkg/util/taints"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'operatorv1 "github.com/openshift/api/operator/v1"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("NonPreRelease-Longduration-Author:fxie-Critical-69771-[HyperShiftINSTALL] When initial non-serving nodes fill up new pods prefer to go to untainted default nodes instead of scaling non-serving ones [Disruptive]", func() {
// Variables
var (
testCaseId = "69771"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
mhcTemplate = filepath.Join(fixturePath, "mhc.yaml")
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hc1Name = fmt.Sprintf("%s-hc-1", resourceNamePrefix)
hc2Name = fmt.Sprintf("%s-hc-2", resourceNamePrefix)
mhcNamePrefix = fmt.Sprintf("%s-mhc", resourceNamePrefix)
adminKubeClient = oc.AdminKubeClient()
numWorkersExpected = 3
numMasters = 3
numMsetsExpected = 3
errList []error
clusterAutoscaler = `apiVersion: "autoscaling.openshift.io/v1"
kind: "ClusterAutoscaler"
metadata:
name: "default"
spec:
scaleDown:
enabled: true
delayAfterAdd: 10s
delayAfterDelete: 10s
delayAfterFailure: 10s
unneededTime: 10s`
clusterAutoscalerFileName = fmt.Sprintf("%s-clusterautoscaler.yaml", resourceNamePrefix)
machineAutoscalerTemplate = `apiVersion: "autoscaling.openshift.io/v1beta1"
kind: "MachineAutoscaler"
metadata:
name: %[1]s
namespace: "openshift-machine-api"
spec:
minReplicas: 1
maxReplicas: 3
scaleTargetRef:
apiVersion: machine.openshift.io/v1beta1
kind: MachineSet
name: %[1]s`
machineAutoscalerFileName = fmt.Sprintf("%s-machineautoscaler.yaml", resourceNamePrefix)
)
// Aggregated error handling
defer func() {
o.Expect(errors2.NewAggregate(errList)).NotTo(o.HaveOccurred())
}()
exutil.By("Inspecting platform")
exutil.SkipNoCapabilities(oc, "MachineAPI")
exutil.SkipIfPlatformTypeNot(oc, "aws")
msetNames := clusterinfra.ListWorkerMachineSetNames(oc)
// In theory the number of MachineSets does not have to be exactly 3 but should be at least 3.
// The following enforcement is for alignment with the test case only.
if numMset := len(msetNames); numMset != numMsetsExpected {
g.Skip("Expect %v worker machinesets but found %v, skipping", numMsetsExpected, numMset)
}
e2e.Logf("Found worker machinesets %v on the management cluster", msetNames)
nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), adminKubeClient)
o.Expect(err).NotTo(o.HaveOccurred())
// In theory the number of ready schedulable Nodes does not have to be exactly 3 but should be at least 3.
// The following is enforced for alignment with the test case only.
numReadySchedulableNodes := len(nodeList.Items)
if numReadySchedulableNodes != numWorkersExpected {
g.Skip("Expect %v ready schedulable nodes but found %v, skipping", numWorkersExpected, numReadySchedulableNodes)
}
numNode := numReadySchedulableNodes + numMasters
e2e.Logf("Found %v nodes on the management cluster", numNode)
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Found management cluster region = %s", region)
defer func() {
e2e.Logf("Making sure we ends up with the correct number of nodes and all of them are ready and schedulable")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, LongTimeout, true, func(_ context.Context) (bool, error) {
nodeList, err := adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{"node-role.kubernetes.io/worker": ""}).String(),
})
if err != nil {
return false, err
}
if numWorker := len(nodeList.Items); numWorker != numWorkersExpected {
e2e.Logf("Expect %v worker nodes but found %v, keep polling", numWorkersExpected, numWorker)
return false, nil
}
for _, node := range nodeList.Items {
if !e2enode.IsNodeReady(&node) {
e2e.Logf("Worker node %v not ready, keep polling", node.Name)
return false, nil
}
if len(node.Spec.Taints) > 0 {
e2e.Logf("Worker node tainted, keep polling", node.Name)
return false, nil
}
if _, ok := node.Labels[hypershiftClusterLabelKey]; ok {
e2e.Logf("Worker node still has the %v label, keep polling", hypershiftClusterLabelKey)
return false, nil
}
}
return true, nil
})
errList = append(errList, err)
}()
exutil.By("Creating autoscalers")
e2e.Logf("Creating ClusterAutoscaler")
err = os.WriteFile(clusterAutoscalerFileName, []byte(clusterAutoscaler), os.ModePerm)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", clusterAutoscalerFileName).Execute()
errList = append(errList, err)
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", clusterAutoscalerFileName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Creating MachineAutoscaler")
err = os.WriteFile(machineAutoscalerFileName, []byte(fmt.Sprintf(machineAutoscalerTemplate, msetNames[2])), os.ModePerm)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", machineAutoscalerFileName).Execute()
errList = append(errList, err)
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineAutoscalerFileName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating extra worker nodes")
var extraMsetNames []string
for _, msetName := range msetNames {
extraMsetName := fmt.Sprintf("%s-%s-1", msetName, testCaseId)
extraMset := clusterinfra.MachineSetNonSpotDescription{
Name: extraMsetName,
Replicas: 1,
}
defer func() {
errList = append(errList, extraMset.DeleteMachineSet(oc))
}()
extraMset.CreateMachineSetBasedOnExisting(oc, msetName, false)
extraMsetNames = append(extraMsetNames, extraMsetName)
}
e2e.Logf("Waiting until all nodes are ready")
_, err = e2enode.CheckReady(context.Background(), adminKubeClient, numNode+len(extraMsetNames), LongTimeout)
o.Expect(err).ShouldNot(o.HaveOccurred())
/*
Worker nodes at this point:
Worker 1 <-> machineset 1 <-> AZ1
Worker 2 <-> machineset 2 <-> AZ2
Worker 3 <-> machineset 3 <-> AZ3 <-> non-serving node <-> autoscaling enabled
Extra worker 1 <-> extra machineset 1 (based on machineset 1) <-> AZ1
Extra worker 2 <-> extra machineset 2 (based on machineset 2) <-> AZ2
Extra worker 3 <-> extra machineset 3 (based on machineset 3) <-> AZ3 <-> default worker node
Serving node pairs to define:
Serving pair 1 <-> dedicated for serving components of HostedCluster 1 <-> worker 1 + worker 2
Serving pair 2 <-> dedicated for serving components of HostedCluster 2 <-> extra worker 1 + extra worker 2
*/
exutil.By("Defining serving pairs")
e2e.Logf("Getting node name for each machineset")
var workerNodeNames []string
msetNames = append(msetNames, extraMsetNames...)
for _, msetName := range msetNames {
workerNodeNames = append(workerNodeNames, exutil.GetNodeNameByMachineset(oc, msetName))
}
e2e.Logf("Found worker nodes %s on the management cluster", workerNodeNames)
servingPair1Indices := []int{0, 1}
var servingPair1NodesNames, servingPair1MsetNames []string
for _, idx := range servingPair1Indices {
servingPair1NodesNames = append(servingPair1NodesNames, workerNodeNames[idx])
servingPair1MsetNames = append(servingPair1MsetNames, msetNames[idx])
}
e2e.Logf("Serving pair 1 nodes = %v, machinesets = %v", servingPair1NodesNames, servingPair1MsetNames)
nonServingIndex := 2
nonServingMsetName := msetNames[nonServingIndex]
nonServingNodeName := workerNodeNames[nonServingIndex]
e2e.Logf("Non serving node = %v, machineset = %v", nonServingNodeName, nonServingMsetName)
servingPair2Indices := []int{3, 4}
var servingPair2NodeNames, servingPair2MsetNames []string
for _, idx := range servingPair2Indices {
servingPair2NodeNames = append(servingPair2NodeNames, workerNodeNames[idx])
servingPair2MsetNames = append(servingPair2MsetNames, msetNames[idx])
}
e2e.Logf("Serving pair 2 nodes = %v, machinesets = %v", servingPair2NodeNames, servingPair2MsetNames)
defaultWorkerIndex := 5
defaultWorkerNodeName := workerNodeNames[defaultWorkerIndex]
defaultWorkerMsetName := msetNames[defaultWorkerIndex]
e2e.Logf("Default worker node = %v, machineset = %v", defaultWorkerNodeName, defaultWorkerMsetName)
exutil.By("Creating a MachineHealthCheck for each serving machineset")
infraId := doOcpReq(oc, OcpGet, true, "infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}")
e2e.Logf("Found infra ID = %s", infraId)
for _, msetName := range append(servingPair1MsetNames, servingPair2MsetNames...) {
mhcName := fmt.Sprintf("%s-%s", mhcNamePrefix, msetName)
parsedTemplate := fmt.Sprintf("%s.template", mhcName)
mhc := mhcDescription{
Clusterid: infraId,
Maxunhealthy: "100%",
MachinesetName: msetName,
Name: mhcName,
Namespace: machineAPINamespace,
template: mhcTemplate,
}
defer mhc.deleteMhc(oc, parsedTemplate)
mhc.createMhc(oc, parsedTemplate)
}
exutil.By("Adding labels and taints to serving pair 1 nodes and the non serving node")
// The osd-fleet-manager.openshift.io/paired-nodes label is not a must for request serving nodes
e2e.Logf("Adding labels and taints to serving pair 1 nodes")
defer func() {
for _, node := range servingPair1NodesNames {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", node, servingComponentNodesTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, servingComponentNodesLabelKey+"-").Execute()
}
}()
for _, node := range servingPair1NodesNames {
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", node, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", node, servingComponentNodesLabel)
}
e2e.Logf("Adding labels and taints to the non serving node")
defer func() {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", nonServingNodeName, nonServingComponentTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nonServingNodeName, nonServingComponentLabelKey+"-").Execute()
}()
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", nonServingNodeName, nonServingComponentTaint)
doOcpReq(oc, OcpLabel, true, "node", nonServingNodeName, nonServingComponentLabel)
exutil.By("Installing the Hypershift Operator")
defer func() {
errList = append(errList, os.RemoveAll(tempDir))
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer func() {
// This is required otherwise the tainted serving nodes will not be removed
exutil.By("Waiting for the serving nodes to be removed before uninstalling the Hypershift Operator")
for _, node := range append(servingPair1NodesNames, servingPair2NodeNames...) {
exutil.WaitForNodeToDisappear(oc, node, LongTimeout, DefaultTimeout/10)
}
installHelper.hyperShiftUninstall()
}()
installHelper.hyperShiftInstall()
exutil.By("Creating hosted cluster 1 with request serving annotation")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster1 := installHelper.createClusterAWSCommonBuilder().
withName(hc1Name).
withNodePoolReplicas(1).
withAnnotation(hcTopologyAnnotationKey, "dedicated-request-serving-components").
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster1)
_ = installHelper.createAWSHostedClusters(createCluster1)
exutil.By("Adding labels and taints to serving pair 2 nodes")
// The osd-fleet-manager.openshift.io/paired-nodes label is not a must for request serving nodes
defer func() {
for _, node := range servingPair2NodeNames {
_ = oc.AsAdmin().WithoutNamespace().Run("adm", "taint").Args("node", node, servingComponentNodesTaintKey+"-").Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, servingComponentNodesLabelKey+"-").Execute()
}
}()
for _, node := range servingPair2NodeNames {
doOcpReq(oc, OcpAdm, true, OcpTaint, "node", node, servingComponentNodesTaint)
doOcpReq(oc, OcpLabel, true, "node", node, servingComponentNodesLabel)
}
exutil.By("Creating hosted cluster 2 with request serving annotation")
createCluster2 := installHelper.createClusterAWSCommonBuilder().
withName(hc2Name).
withNodePoolReplicas(1).
withAnnotation(hcTopologyAnnotationKey, "dedicated-request-serving-components").
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster2)
hostedCluster2 := installHelper.createAWSHostedClusters(createCluster2)
hostedCluster2Identifier := fmt.Sprintf("%s-%s", hostedCluster2.namespace, hostedCluster2.name)
e2e.Logf("Hosted cluster 2 created with identifier = %s", hostedCluster2Identifier)
exutil.By("Making sure that non-serving components are scheduled on a default worker node after filling up the non serving node")
podList, err := adminKubeClient.CoreV1().Pods(hostedCluster2Identifier).List(context.Background(), metav1.ListOptions{})
o.Expect(err).ShouldNot(o.HaveOccurred())
var podScheduledOnDefaultWorkerNode bool
for _, pod := range podList.Items {
podName := pod.Name
if isRequestServingComponent(podName) {
e2e.Logf("Pod %v belongs to a request serving component, skipping", podName)
continue
}
e2e.Logf("Pod %v belongs to a non-serving component", podName)
switch nodeName := pod.Spec.NodeName; nodeName {
case nonServingNodeName:
e2e.Logf("Pod scheduled on the non-serving node, expected")
case defaultWorkerNodeName:
e2e.Logf("Pod scheduled on the default worker node, expected")
podScheduledOnDefaultWorkerNode = true
default:
e2e.Failf("Pod scheduled on an unexpected node %v", nodeName)
}
}
o.Expect(podScheduledOnDefaultWorkerNode).To(o.BeTrue(), "Nothing scheduled on the default worker node")
}) | |||||
test case | openshift/openshift-tests-private | db22b597-eb72-4388-a0da-cc851c155d94 | NonPreRelease-Longduration-Author:fxie-Critical-67783-[HyperShiftINSTALL] The environment variable OPENSHIFT_IMG_OVERRIDES in CPO deployment should retain mirroring order under a source compared to the original mirror/source listing in the ICSP/IDMSs in the management cluster [Disruptive] | ['"context"', '"fmt"', '"io"', '"os"', '"path"', '"strings"', '"text/template"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("NonPreRelease-Longduration-Author:fxie-Critical-67783-[HyperShiftINSTALL] The environment variable OPENSHIFT_IMG_OVERRIDES in CPO deployment should retain mirroring order under a source compared to the original mirror/source listing in the ICSP/IDMSs in the management cluster [Disruptive]", func() {
exutil.SkipIfPlatformTypeNot(oc, "aws")
type nodesSchedulabilityStatus bool
// Variables
var (
testCaseId = "67783"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
icspName = fmt.Sprintf("%s-icsp", resourceNamePrefix)
icspSource = "quay.io/openshift-release-dev/ocp-release"
icspMirrors = []string{
"quay.io/openshift-release-dev/ocp-release",
"pull.q1w2.quay.rhcloud.com/openshift-release-dev/ocp-release",
}
icspTemplate = template.Must(template.New("icspTemplate").Parse(`apiVersion: operator.openshift.io/v1alpha1
kind: ImageContentSourcePolicy
metadata:
name: {{ .Name }}
spec:
repositoryDigestMirrors:
- mirrors:
{{- range .Mirrors }}
- {{ . }}
{{- end }}
source: {{ .Source }}`))
adminKubeClient = oc.AdminKubeClient()
errList []error
allNodesSchedulable nodesSchedulabilityStatus = true
atLeastOneNodeUnschedulable nodesSchedulabilityStatus = false
)
// Utilities
var (
checkNodesSchedulability = func(expectedNodeSchedulability nodesSchedulabilityStatus) func(_ context.Context) (bool, error) {
return func(_ context.Context) (bool, error) {
nodeList, err := adminKubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return false, err
}
for _, node := range nodeList.Items {
if !e2enode.IsNodeSchedulable(&node) {
e2e.Logf("Node %s unschedulable", node.Name)
return bool(!expectedNodeSchedulability), nil
}
}
// All nodes are schedulable if we reach here
return bool(expectedNodeSchedulability), nil
}
}
)
// Aggregated error handling
defer func() {
o.Expect(errors2.NewAggregate(errList)).NotTo(o.HaveOccurred())
}()
exutil.By("Checking if there's a need to skip the test case")
// ICSPs are not taken into account if IDMSs are found on the management cluster.
// It's ok to proceed even if the IDMS type is not registered to the API server, so no need to handle the error here.
idmsList, _ := oc.AdminConfigClient().ConfigV1().ImageDigestMirrorSets().List(context.Background(), metav1.ListOptions{})
if len(idmsList.Items) > 0 {
g.Skip("Found IDMSs, skipping")
}
// Also make sure the source (for which we'll declare mirrors) is only used by our the ICSP we create.
// The ICSP type is still under v1alpha1 so avoid using strongly-typed client here for future-proof-ness.
existingICSPSources, _, err := oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("ImageContentSourcePolicy", "-o=jsonpath={.items[*].spec.repositoryDigestMirrors[*].source}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(existingICSPSources, icspSource) {
g.Skip("An existing ICSP declares the source we'll be using, skipping")
}
exutil.By("Creating an ICSP on the management cluster")
e2e.Logf("Creating temporary directory")
defer func() {
errList = append(errList, os.RemoveAll(tempDir))
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var icspFile *os.File
icspFile, err = os.CreateTemp(tempDir, resourceNamePrefix)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
errList = append(errList, icspFile.Close())
}()
e2e.Logf("Parsed template: ")
err = icspTemplate.Execute(io.MultiWriter(g.GinkgoWriter, icspFile), &struct {
Name string
Source string
Mirrors []string
}{Name: icspName, Source: icspSource, Mirrors: icspMirrors})
o.Expect(err).NotTo(o.HaveOccurred(), "Error executing ICSP template")
e2e.Logf("Creating the parsed template")
defer func() {
// After the deletion of an ICSP, the MCO updates CRI-O configurations, cordoning the nodes in turn.
exutil.By("Restoring the management cluster")
e2e.Logf("Deleting the ICSP")
err = oc.AsAdmin().WithoutNamespace().Run(OcpDelete).Args("-f", icspFile.Name()).Execute()
errList = append(errList, err)
e2e.Logf("Waiting for the first node to be cordoned")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, DefaultTimeout, true, checkNodesSchedulability(atLeastOneNodeUnschedulable))
errList = append(errList, err)
e2e.Logf("Waiting for all nodes to be un-cordoned")
err = wait.PollUntilContextTimeout(context.Background(), DefaultTimeout/10, LongTimeout, true, checkNodesSchedulability(allNodesSchedulable))
errList = append(errList, err)
}()
err = oc.AsAdmin().WithoutNamespace().Run(OcpCreate).Args("-f", icspFile.Name()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// After the creation of an ICSP, the MCO updates CRI-O configurations in a way
// that should not make the nodes un-schedulable. Make sure it is the case here.
e2e.Logf("Making sure that management cluster is stable")
// Simulate o.Consistently
err = wait.PollUntilContextTimeout(context.Background(), ShortTimeout/10, ShortTimeout, true, checkNodesSchedulability(atLeastOneNodeUnschedulable))
o.Expect(err).To(o.BeAssignableToTypeOf(context.DeadlineExceeded))
exutil.By("Installing the Hypershift Operator")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Found management cluster region = %s", region)
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a hosted cluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withReleaseImage(release)
defer installHelper.destroyAWSHostedClusters(createCluster)
hc := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Making sure that OPENSHIFT_IMG_OVERRIDES retains mirroring order from ICSP")
// The ICSP created has one and only one source.
// We expect parts like source=mirrorX to be adjacent to each other within OPENSHIFT_IMG_OVERRIDES
var parts []string
for _, mirror := range icspMirrors {
parts = append(parts, fmt.Sprintf("%s=%s", icspSource, mirror))
}
expectedSubstr := strings.Join(parts, ",")
e2e.Logf("Expect to find substring %s within OPENSHIFT_IMG_OVERRIDES", expectedSubstr)
cpoDeploy, err := adminKubeClient.AppsV1().Deployments(hc.getHostedComponentNamespace()).Get(context.Background(), "control-plane-operator", metav1.GetOptions{})
o.Expect(err).ShouldNot(o.HaveOccurred())
for _, container := range cpoDeploy.Spec.Template.Spec.Containers {
if container.Name != "control-plane-operator" {
continue
}
for _, env := range container.Env {
if env.Name != "OPENSHIFT_IMG_OVERRIDES" {
continue
}
e2e.Logf("Found OPENSHIFT_IMG_OVERRIDES=%s", env.Value)
o.Expect(env.Value).To(o.ContainSubstring(expectedSubstr))
}
}
}) | |||||
test case | openshift/openshift-tests-private | 5e0e1977-d2b4-4991-89f6-065a11155909 | Longduration-NonPreRelease-Author:fxie-Critical-65606-[HyperShiftINSTALL] The cluster can be deleted successfully when hosted zone for private link is missing [Serial] | ['"context"', '"fmt"', '"os"', '"path"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/aws/aws-sdk-go/aws/awserr"', '"github.com/aws/aws-sdk-go/service/route53"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:fxie-Critical-65606-[HyperShiftINSTALL] The cluster can be deleted successfully when hosted zone for private link is missing [Serial]", func() {
var (
testCaseId = "65606"
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
ctx = context.Background()
)
exutil.By("Skipping incompatible platforms")
exutil.SkipIfPlatformTypeNot(oc, "aws")
exutil.By("Installing the Hypershift Operator")
region, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := os.RemoveAll(tempDir)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
installType: PublicAndPrivate,
region: region,
externalDNS: true,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a PublicAndPrivate hosted cluster with external DNS enabled")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withEndpointAccess(PublicAndPrivate).
withReleaseImage(release).
withExternalDnsDomain(hypershiftExternalDNSDomainAWS).
withBaseDomain(hypershiftExternalDNSBaseDomainAWS)
defer installHelper.destroyAWSHostedClusters(createCluster)
hostedCluster := installHelper.createAWSHostedClusters(createCluster)
// Pause reconciliation so the awsprivatelink controller do not re-create the DNS records which we will delete
exutil.By("Pausing reconciliation")
defer func() {
exutil.By("Un-pausing reconciliation")
doOcpReq(oc, OcpPatch, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, "--type=merge", `--patch={"spec":{"pausedUntil":null}}`)
// Avoid intricate dependency violations that could occur during the deletion of the HC
e2e.Logf("Waiting until the un-pause signal propagates to the HCP")
o.Eventually(func() bool {
res := doOcpReq(oc, OcpGet, false, "hcp", "-n", hostedCluster.getHostedComponentNamespace(), hostedCluster.name, "-o=jsonpath={.spec.pausedUntil}")
return len(res) == 0
}).WithTimeout(DefaultTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
}()
doOcpReq(oc, OcpPatch, true, "hc", hostedCluster.name, "-n", hostedCluster.namespace, "--type=merge", `--patch={"spec":{"pausedUntil":"true"}}`)
exutil.By("Waiting until the awsprivatelink controller is actually paused")
// A hack for simplicity
_, err = exutil.WaitAndGetSpecificPodLogs(oc, hostedCluster.getHostedComponentNamespace(), "control-plane-operator", "deploy/control-plane-operator", "awsendpointservice | grep -i 'Reconciliation paused'")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get Route53 hosted zone for privatelink")
hzId := doOcpReq(oc, OcpGet, true, "awsendpointservice/private-router", "-n", hostedCluster.getHostedComponentNamespace(), "-o=jsonpath={.status.dnsZoneID}")
e2e.Logf("Found hosted zone ID = %s", hzId)
clusterinfra.GetAwsCredentialFromCluster(oc)
route53Client := exutil.NewRoute53Client()
// Get hosted zone name for logging purpose only
var getHzOut *route53.GetHostedZoneOutput
getHzOut, err = route53Client.GetHostedZoneWithContext(ctx, &route53.GetHostedZoneInput{
Id: aws.String(hzId),
})
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Found hosted zone name = %s", aws.StringValue(getHzOut.HostedZone.Name))
exutil.By("Delete Route53 hosted zone for privatelink")
e2e.Logf("Emptying Route53 hosted zone")
if _, err = route53Client.EmptyHostedZoneWithContext(ctx, hzId); err != nil {
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Failed to empty hosted zone (%s): %v", aerr.Code(), aerr.Message())
}
e2e.Failf("Failed to empty hosted zone %v", err)
}
e2e.Logf("Deleting Route53 hosted zone")
if _, err = route53Client.DeleteHostedZoneWithContextAndCheck(ctx, &route53.DeleteHostedZoneInput{
Id: aws.String(hzId),
}); err != nil {
if aerr, ok := err.(awserr.Error); ok {
e2e.Failf("Failed to delete hosted zone (%s): %v", aerr.Code(), aerr.Message())
}
e2e.Failf("Failed to delete hosted zone %v", err)
}
}) | |||||
test case | openshift/openshift-tests-private | 95a2bc54-9af2-46b5-bdcc-b1b0edeadbf0 | Longduration-NonPreRelease-Author:fxie-Critical-67225-[HyperShiftINSTALL] Test annotation 'hypershift.openshift.io/destroy-grace-period' in the HostedCluster [Serial] | ['"fmt"', '"io"', '"os"', '"path"', '"strings"', '"github.com/aws/aws-sdk-go/aws"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', 'appsv1 "k8s.io/api/apps/v1"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/util/wait"', 'operatorv1 "github.com/openshift/api/operator/v1"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Longduration-NonPreRelease-Author:fxie-Critical-67225-[HyperShiftINSTALL] Test annotation 'hypershift.openshift.io/destroy-grace-period' in the HostedCluster [Serial]", func() {
exutil.SkipIfPlatformTypeNot(oc, "aws")
var (
testCaseId = getTestCaseIDs()[0]
resourceNamePrefix = fmt.Sprintf("%s-%s", testCaseId, strings.ToLower(exutil.RandStrDefault()))
tempDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
bucketName = fmt.Sprintf("%s-bucket", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
pvName = fmt.Sprintf("%s-pv", resourceNamePrefix)
pvYamlStr = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolume
metadata:
name: %s
finalizers:
- what/ever
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteOnce
nfs:
path: /what/ever
server: 127.0.0.1`, pvName)
)
exutil.By("Installing the Hypershift Operator")
defer func() {
_ = os.RemoveAll(tempDir)
}()
err := os.MkdirAll(tempDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var region string
region, err = getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
installHelper := installHelper{
oc: oc,
bucketName: bucketName,
dir: tempDir,
iaasPlatform: iaasPlatform,
region: region,
}
defer installHelper.deleteAWSS3Bucket()
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating a public HostedCluster")
release, err := exutil.GetReleaseImage(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster := installHelper.createClusterAWSCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withAnnotation(cleanupCloudResAnnotationKey, "true").
withAnnotation(destroyGracePeriodAnnotationKey, "120s").
withReleaseImage(release)
// Delete HC manually as it could be gone at this point
defer installHelper.deleteHostedClustersManual(createCluster)
hc := installHelper.createAWSHostedClusters(createCluster)
exutil.By("Creating an un-deletable PV in the hosted cluster")
var pvFile *os.File
pvFile, err = os.CreateTemp(tempDir, resourceNamePrefix)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_ = pvFile.Close()
}()
_, err = io.MultiWriter(g.GinkgoWriter, pvFile).Write([]byte(pvYamlStr))
o.Expect(err).ShouldNot(o.HaveOccurred())
installHelper.createHostedClusterKubeconfig(createCluster, hc)
oc.SetGuestKubeconf(hc.hostedClustersKubeconfigFile)
doOcpReq(oc.AsGuestKubeconf(), OcpCreate, true, "-f", pvFile.Name())
exutil.By("Deleting the hosted cluster in a non blocking fashion")
doOcpReq(oc, OcpDelete, true, "hc", hc.name, "-n", hc.namespace, "--wait=false")
exutil.By("Waiting for the CloudResourcesDestroyed condition to be set")
o.Eventually(func() bool {
msg := doOcpReq(oc, OcpGet, false, "hc", hc.name, "-n", hc.namespace, `-o=jsonpath={.status.conditions[?(@.type=="CloudResourcesDestroyed")].message}`)
return strings.Contains(msg, "Remaining resources: persistent-volumes")
}).WithTimeout(LongTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting for the HostedClusterDestroyed condition to be set")
o.Eventually(func() bool {
reason := doOcpReq(oc, OcpGet, false, "hc", hc.name, "-n", hc.namespace, `-o=jsonpath={.status.conditions[?(@.type=="HostedClusterDestroyed")].reason}`)
return reason == "WaitingForGracePeriod"
}).WithTimeout(DoubleLongTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
exutil.By("Waiting for the HostedCluster to be deleted")
o.Eventually(func() bool {
_, stderr, err := oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("hc", hc.name, "-n", hc.namespace).Outputs()
return err != nil && strings.Contains(stderr, "NotFound")
}).WithTimeout(DefaultTimeout).WithPolling(DefaultTimeout / 10).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | f850feeb-38d3-4f8e-9155-a08e3a5ae4a1 | Author:fxie-Longduration-NonPreRelease-Critical-73944-[HyperShiftINSTALL] AZURE Etcd Encryption [Serial] | ['"context"', '"fmt"', '"os"', '"path"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install.go | g.It("Author:fxie-Longduration-NonPreRelease-Critical-73944-[HyperShiftINSTALL] AZURE Etcd Encryption [Serial]", func() {
exutil.SkipIfPlatformTypeNot(oc, "azure")
if exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("This test case requires root credentials, skipping")
}
var (
resourceNamePrefix = getResourceNamePrefix()
activeKeyName = fmt.Sprintf("%s-active-key", resourceNamePrefix)
backupKeyName = fmt.Sprintf("%s-backup-key", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
kvName = fmt.Sprintf("%s-kv", resourceNamePrefix)
rgName = fmt.Sprintf("%s-rg", resourceNamePrefix)
tmpDir = path.Join("/tmp", "hypershift", resourceNamePrefix)
)
e2e.Logf("Getting Azure root credentials from MC")
azCreds := exutil.NewEmptyAzureCredentials()
err := azCreds.GetFromClusterAndDecode(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get Azure root credentials from MC")
exutil.By("Creating a resource group to hold the keyvault")
azClientSet := exutil.NewAzureClientSetWithRootCreds(oc)
_, err = azClientSet.GetResourceGroupClient(nil).CreateOrUpdate(context.Background(), rgName,
armresources.ResourceGroup{Location: to.Ptr(azCreds.AzureRegion)}, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create resource group %s", rgName))
defer func() {
err = azClientSet.DeleteResourceGroup(context.Background(), rgName)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete resource group")
}()
e2e.Logf("Getting object ID of the service principal")
var spObjectId string
spObjectId, err = azClientSet.GetServicePrincipalObjectId(context.Background(), azCreds.AzureClientID)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get object ID of service principal")
exutil.By("Creating a keyvault to hold the keys")
accessPolicies := []*armkeyvault.AccessPolicyEntry{
{
TenantID: to.Ptr(azCreds.AzureTenantID),
ObjectID: to.Ptr(spObjectId),
Permissions: &armkeyvault.Permissions{
Keys: []*armkeyvault.KeyPermissions{
to.Ptr(armkeyvault.KeyPermissionsDecrypt),
to.Ptr(armkeyvault.KeyPermissionsEncrypt),
to.Ptr(armkeyvault.KeyPermissionsCreate),
to.Ptr(armkeyvault.KeyPermissionsGet),
},
},
},
}
kvParams := armkeyvault.VaultCreateOrUpdateParameters{
Location: to.Ptr(azCreds.AzureRegion),
Properties: &armkeyvault.VaultProperties{
SKU: &armkeyvault.SKU{
Name: to.Ptr(armkeyvault.SKUNameStandard),
Family: to.Ptr(armkeyvault.SKUFamilyA),
},
TenantID: to.Ptr(azCreds.AzureTenantID),
AccessPolicies: accessPolicies,
EnablePurgeProtection: to.Ptr(true),
// Minimize this for a minimal chance of keyvault name collision
SoftDeleteRetentionInDays: to.Ptr[int32](7),
},
}
poller, err := azClientSet.GetVaultsClient(nil).BeginCreateOrUpdate(context.Background(), rgName,
kvName, kvParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create keyvalut %s", kvName))
_, err = poller.PollUntilDone(context.Background(), nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to poll for the termination of keyvault creation")
exutil.By("Creating keys within the keyvault")
keyParams := armkeyvault.KeyCreateParameters{
Properties: &armkeyvault.KeyProperties{
// RSA or EC: software-protected
// RSA-HSM or EC-HSM: hardware-protected
Kty: to.Ptr(armkeyvault.JSONWebKeyTypeRSA),
},
}
createActiveKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(context.Background(), rgName,
kvName, activeKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create active key")
createBackupKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(context.Background(), rgName,
kvName, backupKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create backup key")
e2e.Logf("Parsing key URIs")
var activeKey, backupKey azureKMSKey
activeKey, err = parseAzureVaultKeyURI(*createActiveKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse active key URI")
backupKey, err = parseAzureVaultKeyURI(*createBackupKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse backup key URI")
e2e.Logf("Create temporary directory")
o.Expect(os.MkdirAll(tmpDir, 0755)).NotTo(o.HaveOccurred(), "failed to create temporary directory")
defer func() {
o.Expect(os.RemoveAll(tmpDir)).NotTo(o.HaveOccurred(), "failed to remote temporary directory")
}()
exutil.By("Installing Hypershift Operator")
installHelper := installHelper{oc: oc, dir: tmpDir, iaasPlatform: iaasPlatform}
defer installHelper.hyperShiftUninstall()
installHelper.hyperShiftInstall()
exutil.By("Creating hosted cluster")
var release string
release, err = exutil.GetReleaseImage(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get release image")
createCluster := installHelper.createClusterAzureCommonBuilder().
withName(hcName).
withNodePoolReplicas(1).
withReleaseImage(release).
withEncryptionKeyId(*createActiveKeyResp.Properties.KeyURIWithVersion)
defer installHelper.destroyAzureHostedClusters(createCluster)
hc := installHelper.createAzureHostedClusters(createCluster)
e2e.Logf("Extracting kubeconfig of the hosted cluster")
installHelper.createHostedClusterKubeconfig(createCluster, hc)
hc.oc.SetGuestKubeconf(hc.hostedClustersKubeconfigFile)
exutil.By("Specifying a backup key on the HC")
kasResourceVersion := doOcpReq(oc, OcpGet, true, "deploy/kube-apiserver",
"-n", hc.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
hc.patchAzureKMS(nil, &backupKey)
hc.waitForKASDeployUpdate(context.Background(), kasResourceVersion)
hc.waitForKASDeployReady(context.Background())
hc.checkAzureEtcdEncryption(activeKey, &backupKey)
exutil.By("Swapping active & backup key")
kasResourceVersion = doOcpReq(oc, OcpGet, true, "deploy/kube-apiserver",
"-n", hc.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
hc.patchAzureKMS(&backupKey, &activeKey)
hc.waitForKASDeployUpdate(context.Background(), kasResourceVersion)
hc.waitForKASDeployReady(context.Background())
hc.checkAzureEtcdEncryption(backupKey, &activeKey)
exutil.By("Re-encoding all Secrets & ConfigMaps using the current active key")
hc.encodeSecrets(context.Background())
hc.encodeConfigmaps(context.Background())
exutil.By("Remove the backup key from HC")
kasResourceVersion = doOcpReq(oc, OcpGet, true, "deploy/kube-apiserver",
"-n", hc.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
hc.removeAzureKMSBackupKey()
hc.waitForKASDeployUpdate(context.Background(), kasResourceVersion)
hc.waitForKASDeployReady(context.Background())
hc.checkAzureEtcdEncryption(backupKey, nil)
}) | |||||
test | openshift/openshift-tests-private | 60e79c6b-2cef-430c-b40b-1a44f2442e7b | hypershift_install_aro | import (
"context"
"fmt"
"log/slog"
"os"
"path"
"strconv"
"strings"
"sync"
"golang.org/x/sync/errgroup"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/ptr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install_aro.go | package hypershift
import (
"context"
"fmt"
"log/slog"
"os"
"path"
"strconv"
"strings"
"sync"
"golang.org/x/sync/errgroup"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/ptr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-hypershift] Hypershift [HyperShiftAKSINSTALL]", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIForKube("hcp-aks-install")
bashClient *CLI
)
g.BeforeEach(func(ctx g.SpecContext) {
exutil.SkipOnAKSNess(ctx, oc, true)
exutil.SkipOnHypershiftOperatorExistence(oc, true)
bashClient = NewCmdClient().WithShowInfo(true)
logHypershiftCLIVersion(bashClient)
})
// Test run duration: ~45min
g.It("Author:fxie-Longduration-NonPreRelease-Critical-74561-Test basic proc for shared ingress [Serial]", func(ctx context.Context) {
var (
resourceNamePrefix = getResourceNamePrefix()
hc1Name = fmt.Sprintf("%s-hc1", resourceNamePrefix)
hc2Name = fmt.Sprintf("%s-hc2", resourceNamePrefix)
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
)
createTempDir(tempDir)
exutil.By("Creating two HCs simultaneously")
createCluster1 := installhelper.createClusterAROCommonBuilder().withName(hc1Name)
createCluster2 := installhelper.createClusterAROCommonBuilder().withName(hc2Name)
defer func() {
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
installhelper.dumpDestroyAROHostedCluster(createCluster1)
}()
go func() {
defer g.GinkgoRecover()
defer wg.Done()
installhelper.dumpDestroyAROHostedCluster(createCluster2)
}()
wg.Wait()
}()
// TODO: fully parallelize HC creation
hc1 := installhelper.createAzureHostedClusterWithoutCheck(createCluster1)
hc2 := installhelper.createAzureHostedClusterWithoutCheck(createCluster2)
hc1.pollUntilReady()
hc2.pollUntilReady()
installhelper.createHostedClusterKubeconfig(createCluster1, hc1)
installhelper.createHostedClusterKubeconfig(createCluster2, hc2)
exutil.By("Making sure that a shared ingress is used by both HCs")
sharedIngressExternalIp := getSharedIngressRouterExternalIp(oc)
hc1RouteBackends := doOcpReq(oc, OcpGet, true, "route", "-n", hc1.getHostedComponentNamespace(),
"-o=jsonpath={.items[*].status.ingress[0].routerCanonicalHostname}")
hc2RouteBackends := doOcpReq(oc, OcpGet, true, "route", "-n", hc2.getHostedComponentNamespace(),
"-o=jsonpath={.items[*].status.ingress[0].routerCanonicalHostname}")
for _, backend := range strings.Split(hc1RouteBackends, " ") {
o.Expect(backend).To(o.Equal(sharedIngressExternalIp), "incorrect backend IP of an HC1 route")
}
for _, backend := range strings.Split(hc2RouteBackends, " ") {
o.Expect(backend).To(o.Equal(sharedIngressExternalIp), "incorrect backend IP of an HC2 route")
}
exutil.By("Scaling up the existing NodePools")
hc1Np1ReplicasNew := ptr.Deref(createCluster1.NodePoolReplicas, 2) + 1
hc2Np1ReplicasNew := ptr.Deref(createCluster2.NodePoolReplicas, 2) + 1
doOcpReq(oc, OcpScale, false, "np", hc1.name, "-n", hc1.namespace, "--replicas", strconv.Itoa(hc1Np1ReplicasNew))
doOcpReq(oc, OcpScale, false, "np", hc2.name, "-n", hc2.namespace, "--replicas", strconv.Itoa(hc2Np1ReplicasNew))
o.Eventually(hc1.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/20).Should(o.Equal(hc1Np1ReplicasNew), "failed to scaling up NodePool for HC")
o.Eventually(hc2.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/20).Should(o.Equal(hc2Np1ReplicasNew), "failed to scaling up NodePool for HC")
exutil.By("Ensuring that the shared ingress properly manages concurrent traffic coming from external DNS")
var eg *errgroup.Group
eg, ctx = errgroup.WithContext(ctx)
hc1ctx := context.WithValue(ctx, ctxKeyId, 1)
hc2ctx := context.WithValue(ctx, ctxKeyId, 2)
hc1Client := oc.SetGuestKubeconf(hc1.hostedClustersKubeconfigFile).GuestKubeClient()
hc2Client := oc.SetGuestKubeconf(hc2.hostedClustersKubeconfigFile).GuestKubeClient()
logger := slog.New(slog.NewTextHandler(g.GinkgoWriter, &slog.HandlerOptions{AddSource: true}))
nsToCreatePerHC := 30
eg.Go(createAndCheckNs(hc1ctx, hc1Client, logger, nsToCreatePerHC, resourceNamePrefix))
eg.Go(createAndCheckNs(hc2ctx, hc2Client, logger, nsToCreatePerHC, resourceNamePrefix))
o.Expect(eg.Wait()).NotTo(o.HaveOccurred(), "at least one goroutine errored out")
})
// Test run duration: ~40min
// Also included: https://issues.redhat.com/browse/HOSTEDCP-1411
g.It("Author:fxie-Longduration-NonPreRelease-Critical-49173-Critical-49174-Test AZURE node root disk size [Serial]", func(ctx context.Context) {
var (
resourceNamePrefix = getResourceNamePrefix()
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
npName = fmt.Sprintf("%s-np", resourceNamePrefix)
npNodeCount = 1
vmRootDiskSize = 90
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
azClientSet = exutil.NewAzureClientSetWithCredsFromCanonicalFile()
)
createTempDir(tempDir)
exutil.By("Creating HostedCluster")
createCluster := installhelper.
createClusterAROCommonBuilder().
withResourceGroupTags("foo=bar,baz=quux").
withRootDiskSize(vmRootDiskSize).
withName(hcName)
defer installhelper.dumpDestroyAROHostedCluster(createCluster)
hc := installhelper.createAzureHostedClusterWithoutCheck(createCluster)
exutil.By("Creating additional NodePool")
subnetId := doOcpReq(oc, OcpGet, true, "hc", hc.name, "-n", hc.namespace, "-o=jsonpath={.spec.platform.azure.subnetID}")
imageId := doOcpReq(oc, OcpGet, true, "np", hc.name, "-n", hc.namespace, "-o=jsonpath={.spec.platform.azure.image.imageID}")
NewAzureNodePool(npName, hc.name, hc.namespace).
WithNodeCount(ptr.To(npNodeCount)).
WithImageId(imageId).
WithSubnetId(subnetId).
WithRootDiskSize(ptr.To(vmRootDiskSize)).
CreateAzureNodePool()
exutil.By("Waiting for the HC and the NP to be ready")
hc.pollUntilReady()
exutil.By("Checking tags on the Azure resource group")
rgName, err := hc.getResourceGroupName()
o.Expect(err).NotTo(o.HaveOccurred(), "error getting resource group of the hosted cluster")
resourceGroupsClientGetResponse, err := azClientSet.GetResourceGroupClient(nil).Get(ctx, rgName, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "error getting Azure resource group")
o.Expect(*resourceGroupsClientGetResponse.Tags["foo"]).To(o.Equal("bar"))
o.Expect(*resourceGroupsClientGetResponse.Tags["baz"]).To(o.Equal("quux"))
exutil.By("Checking VM root disk size")
listVMPager := azClientSet.GetVirtualMachinesClient(nil).NewListPager(rgName, &armcompute.VirtualMachinesClientListOptions{})
err = exutil.ProcessAzurePages(ctx, listVMPager, func(page armcompute.VirtualMachinesClientListResponse) error {
for _, vm := range page.Value {
name := ptr.Deref(vm.Name, "")
if vm.Properties == nil ||
vm.Properties.StorageProfile == nil ||
vm.Properties.StorageProfile.OSDisk == nil ||
vm.Properties.StorageProfile.OSDisk.DiskSizeGB == nil {
return fmt.Errorf("unknown root disk size for VM %s", name)
}
actualRootDiskSize := ptr.Deref(vm.Properties.StorageProfile.OSDisk.DiskSizeGB, -1)
e2e.Logf("Found actual root disk size = %d for VM %s", actualRootDiskSize, name)
if actualRootDiskSize != int32(vmRootDiskSize) {
return fmt.Errorf("expect root disk size %d for VM %s but found %d", vmRootDiskSize, name, actualRootDiskSize)
}
}
return nil
})
o.Expect(err).NotTo(o.HaveOccurred(), "error processing Azure pages")
})
/*
Day-1 creation is covered by CI. This test case focuses on day-2 key rotation.
Test run duration: ~55min
*/
g.It("Author:fxie-Longduration-NonPreRelease-Critical-73944-AZURE Etcd Encryption [Serial]", func(ctx context.Context) {
var (
resourceNamePrefix = getResourceNamePrefix()
activeKeyName = fmt.Sprintf("%s-active-key", resourceNamePrefix)
backupKeyName = fmt.Sprintf("%s-backup-key", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
kvName = fmt.Sprintf("%s-kv", resourceNamePrefix)
rgName = fmt.Sprintf("%s-rg", resourceNamePrefix)
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
)
createTempDir(tempDir)
e2e.Logf("Getting Azure location from MC")
location, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get MC location")
exutil.By("Creating a resource group to hold the keyvault")
azClientSet := exutil.NewAzureClientSetWithCredsFromCanonicalFile()
_, err = azClientSet.GetResourceGroupClient(nil).CreateOrUpdate(ctx, rgName, armresources.ResourceGroup{Location: to.Ptr(location)}, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create resource group %s", rgName))
defer func() {
o.Expect(azClientSet.DeleteResourceGroup(ctx, rgName)).NotTo(o.HaveOccurred(), "failed to delete resource group")
}()
e2e.Logf("Getting object ID of the service principal")
azCreds := exutil.NewEmptyAzureCredentialsFromFile()
err = azCreds.LoadFromFile(exutil.MustGetAzureCredsLocation())
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get Azure root credentials from canonical location")
var spObjectId string
spObjectId, err = azClientSet.GetServicePrincipalObjectId(ctx, azCreds.AzureClientID)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get object ID of service principal")
exutil.By("Creating a keyvault to hold the keys")
accessPolicies := []*armkeyvault.AccessPolicyEntry{
{
TenantID: to.Ptr(azCreds.AzureTenantID),
ObjectID: to.Ptr(spObjectId),
Permissions: &armkeyvault.Permissions{
Keys: []*armkeyvault.KeyPermissions{
to.Ptr(armkeyvault.KeyPermissionsDecrypt),
to.Ptr(armkeyvault.KeyPermissionsEncrypt),
to.Ptr(armkeyvault.KeyPermissionsCreate),
to.Ptr(armkeyvault.KeyPermissionsGet),
},
},
},
}
kvParams := armkeyvault.VaultCreateOrUpdateParameters{
Location: to.Ptr(location),
Properties: &armkeyvault.VaultProperties{
SKU: &armkeyvault.SKU{
Name: to.Ptr(armkeyvault.SKUNameStandard),
Family: to.Ptr(armkeyvault.SKUFamilyA),
},
TenantID: to.Ptr(azCreds.AzureTenantID),
AccessPolicies: accessPolicies,
EnablePurgeProtection: to.Ptr(true),
// Minimize this for a minimal chance of keyvault name collision
SoftDeleteRetentionInDays: to.Ptr[int32](7),
},
}
poller, err := azClientSet.GetVaultsClient(nil).BeginCreateOrUpdate(ctx, rgName, kvName, kvParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create keyvalut %s", kvName))
_, err = poller.PollUntilDone(ctx, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to poll for the termination of keyvault creation")
exutil.By("Creating keys within the keyvault")
keyParams := armkeyvault.KeyCreateParameters{
Properties: &armkeyvault.KeyProperties{
// RSA or EC: software-protected
// RSA-HSM or EC-HSM: hardware-protected
Kty: to.Ptr(armkeyvault.JSONWebKeyTypeRSA),
},
}
createActiveKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(ctx, rgName, kvName, activeKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create active key")
createBackupKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(ctx, rgName, kvName, backupKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create backup key")
e2e.Logf("Parsing key URIs")
var activeKey, backupKey azureKMSKey
activeKey, err = parseAzureVaultKeyURI(*createActiveKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse active key URI")
backupKey, err = parseAzureVaultKeyURI(*createBackupKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse backup key URI")
exutil.By("Creating hosted cluster")
createCluster := installhelper.createClusterAROCommonBuilder().withEncryptionKeyId(*createActiveKeyResp.Properties.KeyURIWithVersion).withName(hcName)
defer installhelper.dumpDestroyAROHostedCluster(createCluster)
hc := installhelper.createAzureHostedClusters(createCluster)
e2e.Logf("Extracting kubeconfig of the hosted cluster")
installhelper.createHostedClusterKubeconfig(createCluster, hc)
hc.oc.SetGuestKubeconf(hc.hostedClustersKubeconfigFile)
exutil.By("Specifying a backup key on the HC")
kasResourceVersion := hc.getKASResourceVersion()
hc.patchAzureKMS(nil, &backupKey)
hc.waitForKASDeployUpdate(ctx, kasResourceVersion)
hc.waitForKASDeployReady(ctx)
hc.checkAzureEtcdEncryption(activeKey, &backupKey)
exutil.By("Swapping active & backup key")
kasResourceVersion = hc.getKASResourceVersion()
hc.patchAzureKMS(&backupKey, &activeKey)
hc.waitForKASDeployUpdate(ctx, kasResourceVersion)
hc.waitForKASDeployReady(ctx)
hc.checkAzureEtcdEncryption(backupKey, &activeKey)
exutil.By("Re-encoding all Secrets & ConfigMaps using the current active key")
hc.encodeSecrets(ctx)
hc.encodeConfigmaps(ctx)
exutil.By("Remove the backup key from HC")
kasResourceVersion = hc.getKASResourceVersion()
hc.removeAzureKMSBackupKey()
hc.waitForKASDeployUpdate(ctx, kasResourceVersion)
hc.waitForKASDeployReady(ctx)
hc.checkAzureEtcdEncryption(backupKey, nil)
})
// Test run duration: ~40min
// Also included: https://issues.redhat.com/browse/OCPBUGS-31090, https://issues.redhat.com/browse/OCPBUGS-31089
g.It("Author:fxie-Longduration-NonPreRelease-Critical-75856-Create AZURE Infrastructure and Hosted Cluster Separately [Serial]", func() {
var (
resourceNamePrefix = getResourceNamePrefix()
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
infraID = fmt.Sprintf("%s-infra", resourceNamePrefix)
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
infraJSON = path.Join(tempDir, "infra.json")
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
)
createTempDir(tempDir)
exutil.By("Looking up RHCOS image URL for Azure Disk")
rhcosImage, err := exutil.GetRHCOSImageURLForAzureDisk(oc, exutil.GetLatestReleaseImageFromEnv(), exutil.GetTestEnv().PullSecretLocation, exutil.CoreOSBootImageArchX86_64)
o.Expect(err).NotTo(o.HaveOccurred(), "error getting RHCOS image for Azure Disk")
exutil.By("Creating Infrastructure")
infra := installhelper.createInfraAROCommonBuilder().withInfraID(infraID).withOutputFile(infraJSON).withRHCOSImage(rhcosImage).withName(hcName)
defer installhelper.destroyAzureInfra(infra)
installhelper.createAzureInfra(infra)
exutil.By("Creating HostedCluster")
createCluster := installhelper.createClusterAROCommonBuilder().withInfraJSON(infraJSON).withName(hcName)
defer installhelper.dumpDeleteAROHostedCluster(createCluster)
_ = installhelper.createAzureHostedClusters(createCluster)
})
})
| package hypershift | ||||
test case | openshift/openshift-tests-private | 9e442303-10fd-4994-a9a1-a484079ae2e8 | Author:fxie-Longduration-NonPreRelease-Critical-74561-Test basic proc for shared ingress [Serial] | ['"context"', '"fmt"', '"log/slog"', '"os"', '"path"', '"strconv"', '"strings"', '"sync"', '"golang.org/x/sync/errgroup"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"k8s.io/utils/ptr"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install_aro.go | g.It("Author:fxie-Longduration-NonPreRelease-Critical-74561-Test basic proc for shared ingress [Serial]", func(ctx context.Context) {
var (
resourceNamePrefix = getResourceNamePrefix()
hc1Name = fmt.Sprintf("%s-hc1", resourceNamePrefix)
hc2Name = fmt.Sprintf("%s-hc2", resourceNamePrefix)
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
)
createTempDir(tempDir)
exutil.By("Creating two HCs simultaneously")
createCluster1 := installhelper.createClusterAROCommonBuilder().withName(hc1Name)
createCluster2 := installhelper.createClusterAROCommonBuilder().withName(hc2Name)
defer func() {
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
installhelper.dumpDestroyAROHostedCluster(createCluster1)
}()
go func() {
defer g.GinkgoRecover()
defer wg.Done()
installhelper.dumpDestroyAROHostedCluster(createCluster2)
}()
wg.Wait()
}()
// TODO: fully parallelize HC creation
hc1 := installhelper.createAzureHostedClusterWithoutCheck(createCluster1)
hc2 := installhelper.createAzureHostedClusterWithoutCheck(createCluster2)
hc1.pollUntilReady()
hc2.pollUntilReady()
installhelper.createHostedClusterKubeconfig(createCluster1, hc1)
installhelper.createHostedClusterKubeconfig(createCluster2, hc2)
exutil.By("Making sure that a shared ingress is used by both HCs")
sharedIngressExternalIp := getSharedIngressRouterExternalIp(oc)
hc1RouteBackends := doOcpReq(oc, OcpGet, true, "route", "-n", hc1.getHostedComponentNamespace(),
"-o=jsonpath={.items[*].status.ingress[0].routerCanonicalHostname}")
hc2RouteBackends := doOcpReq(oc, OcpGet, true, "route", "-n", hc2.getHostedComponentNamespace(),
"-o=jsonpath={.items[*].status.ingress[0].routerCanonicalHostname}")
for _, backend := range strings.Split(hc1RouteBackends, " ") {
o.Expect(backend).To(o.Equal(sharedIngressExternalIp), "incorrect backend IP of an HC1 route")
}
for _, backend := range strings.Split(hc2RouteBackends, " ") {
o.Expect(backend).To(o.Equal(sharedIngressExternalIp), "incorrect backend IP of an HC2 route")
}
exutil.By("Scaling up the existing NodePools")
hc1Np1ReplicasNew := ptr.Deref(createCluster1.NodePoolReplicas, 2) + 1
hc2Np1ReplicasNew := ptr.Deref(createCluster2.NodePoolReplicas, 2) + 1
doOcpReq(oc, OcpScale, false, "np", hc1.name, "-n", hc1.namespace, "--replicas", strconv.Itoa(hc1Np1ReplicasNew))
doOcpReq(oc, OcpScale, false, "np", hc2.name, "-n", hc2.namespace, "--replicas", strconv.Itoa(hc2Np1ReplicasNew))
o.Eventually(hc1.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/20).Should(o.Equal(hc1Np1ReplicasNew), "failed to scaling up NodePool for HC")
o.Eventually(hc2.pollGetHostedClusterReadyNodeCount(""), LongTimeout, LongTimeout/20).Should(o.Equal(hc2Np1ReplicasNew), "failed to scaling up NodePool for HC")
exutil.By("Ensuring that the shared ingress properly manages concurrent traffic coming from external DNS")
var eg *errgroup.Group
eg, ctx = errgroup.WithContext(ctx)
hc1ctx := context.WithValue(ctx, ctxKeyId, 1)
hc2ctx := context.WithValue(ctx, ctxKeyId, 2)
hc1Client := oc.SetGuestKubeconf(hc1.hostedClustersKubeconfigFile).GuestKubeClient()
hc2Client := oc.SetGuestKubeconf(hc2.hostedClustersKubeconfigFile).GuestKubeClient()
logger := slog.New(slog.NewTextHandler(g.GinkgoWriter, &slog.HandlerOptions{AddSource: true}))
nsToCreatePerHC := 30
eg.Go(createAndCheckNs(hc1ctx, hc1Client, logger, nsToCreatePerHC, resourceNamePrefix))
eg.Go(createAndCheckNs(hc2ctx, hc2Client, logger, nsToCreatePerHC, resourceNamePrefix))
o.Expect(eg.Wait()).NotTo(o.HaveOccurred(), "at least one goroutine errored out")
}) | |||||
test case | openshift/openshift-tests-private | 64c2f182-c900-4727-a6bc-a072fbdafd36 | Author:fxie-Longduration-NonPreRelease-Critical-49173-Critical-49174-Test AZURE node root disk size [Serial] | ['"context"', '"fmt"', '"os"', '"path"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"k8s.io/utils/ptr"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install_aro.go | g.It("Author:fxie-Longduration-NonPreRelease-Critical-49173-Critical-49174-Test AZURE node root disk size [Serial]", func(ctx context.Context) {
var (
resourceNamePrefix = getResourceNamePrefix()
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
npName = fmt.Sprintf("%s-np", resourceNamePrefix)
npNodeCount = 1
vmRootDiskSize = 90
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
azClientSet = exutil.NewAzureClientSetWithCredsFromCanonicalFile()
)
createTempDir(tempDir)
exutil.By("Creating HostedCluster")
createCluster := installhelper.
createClusterAROCommonBuilder().
withResourceGroupTags("foo=bar,baz=quux").
withRootDiskSize(vmRootDiskSize).
withName(hcName)
defer installhelper.dumpDestroyAROHostedCluster(createCluster)
hc := installhelper.createAzureHostedClusterWithoutCheck(createCluster)
exutil.By("Creating additional NodePool")
subnetId := doOcpReq(oc, OcpGet, true, "hc", hc.name, "-n", hc.namespace, "-o=jsonpath={.spec.platform.azure.subnetID}")
imageId := doOcpReq(oc, OcpGet, true, "np", hc.name, "-n", hc.namespace, "-o=jsonpath={.spec.platform.azure.image.imageID}")
NewAzureNodePool(npName, hc.name, hc.namespace).
WithNodeCount(ptr.To(npNodeCount)).
WithImageId(imageId).
WithSubnetId(subnetId).
WithRootDiskSize(ptr.To(vmRootDiskSize)).
CreateAzureNodePool()
exutil.By("Waiting for the HC and the NP to be ready")
hc.pollUntilReady()
exutil.By("Checking tags on the Azure resource group")
rgName, err := hc.getResourceGroupName()
o.Expect(err).NotTo(o.HaveOccurred(), "error getting resource group of the hosted cluster")
resourceGroupsClientGetResponse, err := azClientSet.GetResourceGroupClient(nil).Get(ctx, rgName, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "error getting Azure resource group")
o.Expect(*resourceGroupsClientGetResponse.Tags["foo"]).To(o.Equal("bar"))
o.Expect(*resourceGroupsClientGetResponse.Tags["baz"]).To(o.Equal("quux"))
exutil.By("Checking VM root disk size")
listVMPager := azClientSet.GetVirtualMachinesClient(nil).NewListPager(rgName, &armcompute.VirtualMachinesClientListOptions{})
err = exutil.ProcessAzurePages(ctx, listVMPager, func(page armcompute.VirtualMachinesClientListResponse) error {
for _, vm := range page.Value {
name := ptr.Deref(vm.Name, "")
if vm.Properties == nil ||
vm.Properties.StorageProfile == nil ||
vm.Properties.StorageProfile.OSDisk == nil ||
vm.Properties.StorageProfile.OSDisk.DiskSizeGB == nil {
return fmt.Errorf("unknown root disk size for VM %s", name)
}
actualRootDiskSize := ptr.Deref(vm.Properties.StorageProfile.OSDisk.DiskSizeGB, -1)
e2e.Logf("Found actual root disk size = %d for VM %s", actualRootDiskSize, name)
if actualRootDiskSize != int32(vmRootDiskSize) {
return fmt.Errorf("expect root disk size %d for VM %s but found %d", vmRootDiskSize, name, actualRootDiskSize)
}
}
return nil
})
o.Expect(err).NotTo(o.HaveOccurred(), "error processing Azure pages")
}) | |||||
test case | openshift/openshift-tests-private | 9da9adde-fd89-4cb4-8a5b-4c41a9e4e7e8 | Author:fxie-Longduration-NonPreRelease-Critical-73944-AZURE Etcd Encryption [Serial] | ['"context"', '"fmt"', '"os"', '"path"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install_aro.go | g.It("Author:fxie-Longduration-NonPreRelease-Critical-73944-AZURE Etcd Encryption [Serial]", func(ctx context.Context) {
var (
resourceNamePrefix = getResourceNamePrefix()
activeKeyName = fmt.Sprintf("%s-active-key", resourceNamePrefix)
backupKeyName = fmt.Sprintf("%s-backup-key", resourceNamePrefix)
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
kvName = fmt.Sprintf("%s-kv", resourceNamePrefix)
rgName = fmt.Sprintf("%s-rg", resourceNamePrefix)
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
)
createTempDir(tempDir)
e2e.Logf("Getting Azure location from MC")
location, err := getClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get MC location")
exutil.By("Creating a resource group to hold the keyvault")
azClientSet := exutil.NewAzureClientSetWithCredsFromCanonicalFile()
_, err = azClientSet.GetResourceGroupClient(nil).CreateOrUpdate(ctx, rgName, armresources.ResourceGroup{Location: to.Ptr(location)}, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create resource group %s", rgName))
defer func() {
o.Expect(azClientSet.DeleteResourceGroup(ctx, rgName)).NotTo(o.HaveOccurred(), "failed to delete resource group")
}()
e2e.Logf("Getting object ID of the service principal")
azCreds := exutil.NewEmptyAzureCredentialsFromFile()
err = azCreds.LoadFromFile(exutil.MustGetAzureCredsLocation())
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get Azure root credentials from canonical location")
var spObjectId string
spObjectId, err = azClientSet.GetServicePrincipalObjectId(ctx, azCreds.AzureClientID)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get object ID of service principal")
exutil.By("Creating a keyvault to hold the keys")
accessPolicies := []*armkeyvault.AccessPolicyEntry{
{
TenantID: to.Ptr(azCreds.AzureTenantID),
ObjectID: to.Ptr(spObjectId),
Permissions: &armkeyvault.Permissions{
Keys: []*armkeyvault.KeyPermissions{
to.Ptr(armkeyvault.KeyPermissionsDecrypt),
to.Ptr(armkeyvault.KeyPermissionsEncrypt),
to.Ptr(armkeyvault.KeyPermissionsCreate),
to.Ptr(armkeyvault.KeyPermissionsGet),
},
},
},
}
kvParams := armkeyvault.VaultCreateOrUpdateParameters{
Location: to.Ptr(location),
Properties: &armkeyvault.VaultProperties{
SKU: &armkeyvault.SKU{
Name: to.Ptr(armkeyvault.SKUNameStandard),
Family: to.Ptr(armkeyvault.SKUFamilyA),
},
TenantID: to.Ptr(azCreds.AzureTenantID),
AccessPolicies: accessPolicies,
EnablePurgeProtection: to.Ptr(true),
// Minimize this for a minimal chance of keyvault name collision
SoftDeleteRetentionInDays: to.Ptr[int32](7),
},
}
poller, err := azClientSet.GetVaultsClient(nil).BeginCreateOrUpdate(ctx, rgName, kvName, kvParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to create keyvalut %s", kvName))
_, err = poller.PollUntilDone(ctx, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to poll for the termination of keyvault creation")
exutil.By("Creating keys within the keyvault")
keyParams := armkeyvault.KeyCreateParameters{
Properties: &armkeyvault.KeyProperties{
// RSA or EC: software-protected
// RSA-HSM or EC-HSM: hardware-protected
Kty: to.Ptr(armkeyvault.JSONWebKeyTypeRSA),
},
}
createActiveKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(ctx, rgName, kvName, activeKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create active key")
createBackupKeyResp, err := azClientSet.GetKeysClient(nil).CreateIfNotExist(ctx, rgName, kvName, backupKeyName, keyParams, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create backup key")
e2e.Logf("Parsing key URIs")
var activeKey, backupKey azureKMSKey
activeKey, err = parseAzureVaultKeyURI(*createActiveKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse active key URI")
backupKey, err = parseAzureVaultKeyURI(*createBackupKeyResp.Properties.KeyURIWithVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to parse backup key URI")
exutil.By("Creating hosted cluster")
createCluster := installhelper.createClusterAROCommonBuilder().withEncryptionKeyId(*createActiveKeyResp.Properties.KeyURIWithVersion).withName(hcName)
defer installhelper.dumpDestroyAROHostedCluster(createCluster)
hc := installhelper.createAzureHostedClusters(createCluster)
e2e.Logf("Extracting kubeconfig of the hosted cluster")
installhelper.createHostedClusterKubeconfig(createCluster, hc)
hc.oc.SetGuestKubeconf(hc.hostedClustersKubeconfigFile)
exutil.By("Specifying a backup key on the HC")
kasResourceVersion := hc.getKASResourceVersion()
hc.patchAzureKMS(nil, &backupKey)
hc.waitForKASDeployUpdate(ctx, kasResourceVersion)
hc.waitForKASDeployReady(ctx)
hc.checkAzureEtcdEncryption(activeKey, &backupKey)
exutil.By("Swapping active & backup key")
kasResourceVersion = hc.getKASResourceVersion()
hc.patchAzureKMS(&backupKey, &activeKey)
hc.waitForKASDeployUpdate(ctx, kasResourceVersion)
hc.waitForKASDeployReady(ctx)
hc.checkAzureEtcdEncryption(backupKey, &activeKey)
exutil.By("Re-encoding all Secrets & ConfigMaps using the current active key")
hc.encodeSecrets(ctx)
hc.encodeConfigmaps(ctx)
exutil.By("Remove the backup key from HC")
kasResourceVersion = hc.getKASResourceVersion()
hc.removeAzureKMSBackupKey()
hc.waitForKASDeployUpdate(ctx, kasResourceVersion)
hc.waitForKASDeployReady(ctx)
hc.checkAzureEtcdEncryption(backupKey, nil)
}) | |||||
test case | openshift/openshift-tests-private | 6b860bdd-c637-4c2b-a0b9-a1b77f6f6da7 | Author:fxie-Longduration-NonPreRelease-Critical-75856-Create AZURE Infrastructure and Hosted Cluster Separately [Serial] | ['"fmt"', '"os"', '"path"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hypershift_install_aro.go | g.It("Author:fxie-Longduration-NonPreRelease-Critical-75856-Create AZURE Infrastructure and Hosted Cluster Separately [Serial]", func() {
var (
resourceNamePrefix = getResourceNamePrefix()
hcName = fmt.Sprintf("%s-hc", resourceNamePrefix)
infraID = fmt.Sprintf("%s-infra", resourceNamePrefix)
tempDir = path.Join(os.TempDir(), "hypershift", resourceNamePrefix)
artifactDir = path.Join(exutil.GetTestEnv().ArtifactDir, "hypershift-artifact", resourceNamePrefix)
infraJSON = path.Join(tempDir, "infra.json")
installhelper = installHelper{oc: oc, dir: tempDir, artifactDir: artifactDir}
)
createTempDir(tempDir)
exutil.By("Looking up RHCOS image URL for Azure Disk")
rhcosImage, err := exutil.GetRHCOSImageURLForAzureDisk(oc, exutil.GetLatestReleaseImageFromEnv(), exutil.GetTestEnv().PullSecretLocation, exutil.CoreOSBootImageArchX86_64)
o.Expect(err).NotTo(o.HaveOccurred(), "error getting RHCOS image for Azure Disk")
exutil.By("Creating Infrastructure")
infra := installhelper.createInfraAROCommonBuilder().withInfraID(infraID).withOutputFile(infraJSON).withRHCOSImage(rhcosImage).withName(hcName)
defer installhelper.destroyAzureInfra(infra)
installhelper.createAzureInfra(infra)
exutil.By("Creating HostedCluster")
createCluster := installhelper.createClusterAROCommonBuilder().withInfraJSON(infraJSON).withName(hcName)
defer installhelper.dumpDeleteAROHostedCluster(createCluster)
_ = installhelper.createAzureHostedClusters(createCluster)
}) | |||||
file | openshift/openshift-tests-private | e02e124a-0da0-4df6-b08e-bf1a90a3f06a | kubelet_killer | import (
"os"
"path/filepath"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/kubelet_killer.go | package hypershift
import (
"os"
"path/filepath"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
type kubeletKiller struct {
Name string `json:"NAME"`
Namespace string `json:"NAMESPACE"`
NodeName string `json:"NODE_NAME"`
Template string
}
func (k *kubeletKiller) create(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
vars, err := parseTemplateVarParams(k)
o.Expect(err).NotTo(o.HaveOccurred())
params := append([]string{"--ignore-unknown-parameters=true", "-f", k.Template, "-p"}, vars...)
err = k.applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, params...)
if err != nil {
e2e.Logf("failed to create kubelet killer pod %s", err.Error())
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (k *kubeletKiller) delete(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
args := []string{"pod", k.Name, "-n", k.Namespace, "--ignore-not-found"}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (k *kubeletKiller) applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
return applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, parameters...)
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 5b0d966c-a425-485d-b464-a90cce8de5a0 | create | ['kubeletKiller'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/kubelet_killer.go | func (k *kubeletKiller) create(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
vars, err := parseTemplateVarParams(k)
o.Expect(err).NotTo(o.HaveOccurred())
params := append([]string{"--ignore-unknown-parameters=true", "-f", k.Template, "-p"}, vars...)
err = k.applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, params...)
if err != nil {
e2e.Logf("failed to create kubelet killer pod %s", err.Error())
}
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | ||||
function | openshift/openshift-tests-private | cad9a48b-c534-447d-b798-41a283de2398 | delete | ['"os"', '"path/filepath"'] | ['kubeletKiller'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/kubelet_killer.go | func (k *kubeletKiller) delete(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
args := []string{"pod", k.Name, "-n", k.Namespace, "--ignore-not-found"}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | eb2a0bc5-2dbf-43c0-941b-d089379f17b5 | applyResourceFromTemplate | ['kubeletKiller'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/kubelet_killer.go | func (k *kubeletKiller) applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
return applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, parameters...)
} | hypershift | ||||
file | openshift/openshift-tests-private | 729d32fc-6eb3-4720-b2c0-1bb3d1fb36d6 | mhc_util | import (
o "github.com/onsi/gomega"
"os"
"path/filepath"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/mhc_util.go | package hypershift
import (
o "github.com/onsi/gomega"
"os"
"path/filepath"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// utils for the openshift api machines "machinesets.machine.openshift.io" and "machines.machine.openshift.io"
type mhcDescription struct {
MachinesetName string `json:"MACHINESET_NAME"`
Clusterid string `json:"CLUSTERID"`
Namespace string `json:"NAMESPACE"`
Maxunhealthy string `json:"MAXUNHEALTHY"`
Name string `json:"NAME"`
template string
}
func (mhc *mhcDescription) createMhc(oc *exutil.CLI, parsedTemplate string) {
e2e.Logf("Creating machine health check ...")
vars, err := parseTemplateVarParams(mhc)
o.Expect(err).NotTo(o.HaveOccurred())
params := append([]string{"--ignore-unknown-parameters=true", "-f", mhc.template, "-p"}, vars...)
err = applyResourceFromTemplate(oc, "", parsedTemplate, params...)
if err != nil {
e2e.Logf("failed to create machine health check %s", err.Error())
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (mhc *mhcDescription) deleteMhc(oc *exutil.CLI, parsedTemplate string) {
e2e.Logf("Deleting machinehealthcheck ...")
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMHC, mhc.Name, "--ignore-not-found", "-n", mhc.Namespace).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func checkMachinesetReplicaStatus(oc *exutil.CLI, machinesetName string) bool {
desired := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, machinesetName, `-o=jsonpath={.spec.replicas}`)
ready := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, machinesetName, `-o=jsonpath={.status.readyReplicas}`)
available := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, machinesetName, `-o=jsonpath={.status.availableReplicas}`)
e2e.Logf("%s %s desired: %s ready: %s and available: %s", mapiMachineset, machinesetName, desired, ready, available)
return desired == ready && ready == available
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 17db51a5-4d69-4345-91b0-ab84d22d3a29 | createMhc | ['mhcDescription'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/mhc_util.go | func (mhc *mhcDescription) createMhc(oc *exutil.CLI, parsedTemplate string) {
e2e.Logf("Creating machine health check ...")
vars, err := parseTemplateVarParams(mhc)
o.Expect(err).NotTo(o.HaveOccurred())
params := append([]string{"--ignore-unknown-parameters=true", "-f", mhc.template, "-p"}, vars...)
err = applyResourceFromTemplate(oc, "", parsedTemplate, params...)
if err != nil {
e2e.Logf("failed to create machine health check %s", err.Error())
}
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | ||||
function | openshift/openshift-tests-private | c2e6cfc8-f212-4096-a921-685c65a5bfe8 | deleteMhc | ['"os"', '"path/filepath"'] | ['mhcDescription'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/mhc_util.go | func (mhc *mhcDescription) deleteMhc(oc *exutil.CLI, parsedTemplate string) {
e2e.Logf("Deleting machinehealthcheck ...")
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMHC, mhc.Name, "--ignore-not-found", "-n", mhc.Namespace).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | fb4af2e5-55f7-4c4a-8622-b4c84c365ed2 | checkMachinesetReplicaStatus | github.com/openshift/openshift-tests-private/test/extended/hypershift/mhc_util.go | func checkMachinesetReplicaStatus(oc *exutil.CLI, machinesetName string) bool {
desired := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, machinesetName, `-o=jsonpath={.spec.replicas}`)
ready := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, machinesetName, `-o=jsonpath={.status.readyReplicas}`)
available := doOcpReq(oc, OcpGet, false, "-n", machineAPINamespace, mapiMachineset, machinesetName, `-o=jsonpath={.status.availableReplicas}`)
e2e.Logf("%s %s desired: %s ready: %s and available: %s", mapiMachineset, machinesetName, desired, ready, available)
return desired == ready && ready == available
} | hypershift | |||||
file | openshift/openshift-tests-private | dfdbda89-e19b-4027-b156-0f1a8ef09454 | node_action | import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/node_action.go | package hypershift
import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type nodeAction struct {
oc *exutil.CLI
}
func newNodeAction(oc *exutil.CLI) *nodeAction {
return &nodeAction{oc: oc}
}
func (n *nodeAction) taintNode(nodeName string, action string) {
_, er := n.oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", nodeName, action).Output()
if er != nil {
e2e.Logf("taint Node error: %v", er)
o.Expect(er).ShouldNot(o.HaveOccurred())
}
}
func (n *nodeAction) labelNode(nodeName string, action string) {
_, er := n.oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nodeName, action).Output()
if er != nil {
e2e.Logf("label Node error: %v", er)
o.Expect(er).ShouldNot(o.HaveOccurred())
}
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 97596733-41ab-4b42-b45a-6399e6956ff4 | newNodeAction | ['nodeAction'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/node_action.go | func newNodeAction(oc *exutil.CLI) *nodeAction {
return &nodeAction{oc: oc}
} | hypershift | ||||
function | openshift/openshift-tests-private | 370ded61-666b-4f91-bd47-e208b42df339 | taintNode | ['nodeAction'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/node_action.go | func (n *nodeAction) taintNode(nodeName string, action string) {
_, er := n.oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", nodeName, action).Output()
if er != nil {
e2e.Logf("taint Node error: %v", er)
o.Expect(er).ShouldNot(o.HaveOccurred())
}
} | hypershift | ||||
function | openshift/openshift-tests-private | a614176b-92fb-4c75-bd1b-9d24f661e334 | labelNode | ['nodeAction'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/node_action.go | func (n *nodeAction) labelNode(nodeName string, action string) {
_, er := n.oc.AsAdmin().WithoutNamespace().Run("label").Args("node", nodeName, action).Output()
if er != nil {
e2e.Logf("label Node error: %v", er)
o.Expect(er).ShouldNot(o.HaveOccurred())
}
} | hypershift | ||||
file | openshift/openshift-tests-private | 326377d7-bcf8-46d0-b5b6-b3678c0d43dd | nodepool | import (
"fmt"
"strings"
o "github.com/onsi/gomega"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | package hypershift
import (
"fmt"
"strings"
o "github.com/onsi/gomega"
)
type NodePool struct {
ClusterName string `param:"cluster-name"`
Name string `param:"name"`
Namespace string `param:"namespace"`
NodeCount *int `param:"node-count"`
NodeUpgradeType string `param:"node-upgrade-type"`
ReleaseImage string `param:"release-image"`
}
func NewNodePool(clusterName, name, namespace string) *NodePool {
return &NodePool{
ClusterName: clusterName,
Name: name,
Namespace: namespace,
}
}
func (np *NodePool) WithName(name string) *NodePool {
np.Name = name
return np
}
func (np *NodePool) WithNodeCount(nodeCount *int) *NodePool {
np.NodeCount = nodeCount
return np
}
func (np *NodePool) WithNodeUpgradeType(nodeUpgradeType string) *NodePool {
np.NodeUpgradeType = nodeUpgradeType
return np
}
func (np *NodePool) WithReleaseImage(releaseImage string) *NodePool {
np.ReleaseImage = releaseImage
return np
}
type AWSNodePool struct {
NodePool
InstanceProfile string `param:"instance-profile"`
InstanceType string `param:"instance-type"`
RootVolumeIOPS *int64 `param:"root-volume-iops"`
RootVolumeSize *int64 `param:"root-volume-size"`
RootVolumeType string `param:"root-volume-type"`
SecurityGroupID string `param:"securitygroup-id"`
SubnetID string `param:"subnet-id"`
}
func NewAWSNodePool(name, clusterName, namespace string) *AWSNodePool {
return &AWSNodePool{
NodePool: NodePool{
Name: name,
Namespace: namespace,
ClusterName: clusterName,
},
}
}
func (a *AWSNodePool) WithInstanceProfile(profile string) *AWSNodePool {
a.InstanceProfile = profile
return a
}
func (a *AWSNodePool) WithInstanceType(instanceType string) *AWSNodePool {
a.InstanceType = instanceType
return a
}
func (a *AWSNodePool) WithNodeCount(nodeCount *int) *AWSNodePool {
a.NodeCount = nodeCount
return a
}
func (a *AWSNodePool) WithNodeUpgradeType(nodeUpgradeType string) *AWSNodePool {
a.NodeUpgradeType = nodeUpgradeType
return a
}
func (a *AWSNodePool) WithReleaseImage(releaseImage string) *AWSNodePool {
a.ReleaseImage = releaseImage
return a
}
func (a *AWSNodePool) WithRootVolumeIOPS(rootVolumeIOPS *int64) *AWSNodePool {
a.RootVolumeIOPS = rootVolumeIOPS
return a
}
func (a *AWSNodePool) WithRootVolumeSize(rootVolumeSize *int64) *AWSNodePool {
a.RootVolumeSize = rootVolumeSize
return a
}
func (a *AWSNodePool) WithRootVolumeType(rootVolumeType string) *AWSNodePool {
a.RootVolumeType = rootVolumeType
return a
}
func (a *AWSNodePool) WithSecurityGroupID(securityGroupID string) *AWSNodePool {
a.SecurityGroupID = securityGroupID
return a
}
func (a *AWSNodePool) WithSubnetID(subnetID string) *AWSNodePool {
a.SubnetID = subnetID
return a
}
func (a *AWSNodePool) CreateAWSNodePool() {
gCreateNodePool[*AWSNodePool]("aws", a)
}
type AzureNodePool struct {
NodePool
MarketplaceImage *azureMarketplaceImage
ImageId string `param:"image-id"`
RootDiskSize *int `param:"root-disk-size"`
SubnetId string `param:"nodepool-subnet-id"`
}
func NewAzureNodePool(name, clusterName, namespace string) *AzureNodePool {
return &AzureNodePool{
NodePool: NodePool{
Name: name,
Namespace: namespace,
ClusterName: clusterName,
},
}
}
func (a *AzureNodePool) WithImageId(imageId string) *AzureNodePool {
a.ImageId = imageId
return a
}
func (a *AzureNodePool) WithSubnetId(subnetId string) *AzureNodePool {
a.SubnetId = subnetId
return a
}
func (a *AzureNodePool) WithRootDiskSize(rootDiskSize *int) *AzureNodePool {
a.RootDiskSize = rootDiskSize
return a
}
func (a *AzureNodePool) WithNodeCount(nodeCount *int) *AzureNodePool {
a.NodeCount = nodeCount
return a
}
func (a *AzureNodePool) WithMarketplaceImage(marketplaceImage *azureMarketplaceImage) *AzureNodePool {
a.MarketplaceImage = marketplaceImage
return a
}
func (a *AzureNodePool) CreateAzureNodePool() {
gCreateNodePool[*AzureNodePool]("azure", a)
}
// gCreateNodePool creates a nodepool for different platforms,
// nodepool C should be one kind of nodepools. e.g. *AWSNodePool *AzureNodePool
func gCreateNodePool[C any](platform string, nodepool C) {
vars, err := parse(nodepool)
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(vars).ShouldNot(o.BeEmpty())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool %s %s", platform, strings.Join(vars, " "))
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 04ce8d17-ccc5-4182-914e-4778fe8b7c82 | NewNodePool | ['NodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func NewNodePool(clusterName, name, namespace string) *NodePool {
return &NodePool{
ClusterName: clusterName,
Name: name,
Namespace: namespace,
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 1b56cbf3-fe0f-433e-b9a6-866b1b1dd86a | WithName | ['NodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (np *NodePool) WithName(name string) *NodePool {
np.Name = name
return np
} | hypershift | ||||
function | openshift/openshift-tests-private | 43f764c1-9add-41bb-9057-5acfb0ac26b7 | WithNodeCount | ['NodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (np *NodePool) WithNodeCount(nodeCount *int) *NodePool {
np.NodeCount = nodeCount
return np
} | hypershift | ||||
function | openshift/openshift-tests-private | 9c378633-0b77-4210-a675-d953657e398b | WithNodeUpgradeType | ['NodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (np *NodePool) WithNodeUpgradeType(nodeUpgradeType string) *NodePool {
np.NodeUpgradeType = nodeUpgradeType
return np
} | hypershift | ||||
function | openshift/openshift-tests-private | 20f6f5ad-d71f-4ca6-8041-eb18d2e04f42 | WithReleaseImage | ['NodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (np *NodePool) WithReleaseImage(releaseImage string) *NodePool {
np.ReleaseImage = releaseImage
return np
} | hypershift | ||||
function | openshift/openshift-tests-private | 04a99890-e0f9-4239-9259-4221db9bdbcd | NewAWSNodePool | ['NodePool', 'AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func NewAWSNodePool(name, clusterName, namespace string) *AWSNodePool {
return &AWSNodePool{
NodePool: NodePool{
Name: name,
Namespace: namespace,
ClusterName: clusterName,
},
}
} | hypershift | ||||
function | openshift/openshift-tests-private | f26be6de-e5a2-4a82-a6d9-8a5b38ce2761 | WithInstanceProfile | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithInstanceProfile(profile string) *AWSNodePool {
a.InstanceProfile = profile
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 5984f069-00c2-4e4c-9bea-9f94ad039d5b | WithInstanceType | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithInstanceType(instanceType string) *AWSNodePool {
a.InstanceType = instanceType
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 25d00bea-64f9-4304-8353-16cb5d4d7c74 | WithNodeCount | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithNodeCount(nodeCount *int) *AWSNodePool {
a.NodeCount = nodeCount
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | be90bf6d-a3e8-435b-87d6-af2219338e6e | WithNodeUpgradeType | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithNodeUpgradeType(nodeUpgradeType string) *AWSNodePool {
a.NodeUpgradeType = nodeUpgradeType
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | fe751597-81c4-480b-8fed-939a2539c9f4 | WithReleaseImage | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithReleaseImage(releaseImage string) *AWSNodePool {
a.ReleaseImage = releaseImage
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 2dec5bac-a984-4749-8943-c68aa8d7dff7 | WithRootVolumeIOPS | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithRootVolumeIOPS(rootVolumeIOPS *int64) *AWSNodePool {
a.RootVolumeIOPS = rootVolumeIOPS
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 92d6baa1-c9ad-47b1-b051-ab1134d7085a | WithRootVolumeSize | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithRootVolumeSize(rootVolumeSize *int64) *AWSNodePool {
a.RootVolumeSize = rootVolumeSize
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 4ae9d252-9236-4b16-8f85-c608c35e08c5 | WithRootVolumeType | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithRootVolumeType(rootVolumeType string) *AWSNodePool {
a.RootVolumeType = rootVolumeType
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | a8d05194-0894-4963-b438-f104541b111a | WithSecurityGroupID | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithSecurityGroupID(securityGroupID string) *AWSNodePool {
a.SecurityGroupID = securityGroupID
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | a94c69ff-f667-4a46-ba2c-342b35eda75f | WithSubnetID | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) WithSubnetID(subnetID string) *AWSNodePool {
a.SubnetID = subnetID
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 8676a88c-1bc9-4cfc-abce-e4ec54df6843 | CreateAWSNodePool | ['AWSNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AWSNodePool) CreateAWSNodePool() {
gCreateNodePool[*AWSNodePool]("aws", a)
} | hypershift | ||||
function | openshift/openshift-tests-private | 09d37397-7002-4810-8e3b-21cac49233b0 | NewAzureNodePool | ['NodePool', 'AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func NewAzureNodePool(name, clusterName, namespace string) *AzureNodePool {
return &AzureNodePool{
NodePool: NodePool{
Name: name,
Namespace: namespace,
ClusterName: clusterName,
},
}
} | hypershift | ||||
function | openshift/openshift-tests-private | d51cee81-a5d6-456e-8b30-715c531ec117 | WithImageId | ['AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AzureNodePool) WithImageId(imageId string) *AzureNodePool {
a.ImageId = imageId
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 0d6a93d7-0b78-413b-bd9b-e23daec28181 | WithSubnetId | ['AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AzureNodePool) WithSubnetId(subnetId string) *AzureNodePool {
a.SubnetId = subnetId
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 49fb89a4-6baa-4193-a273-5c032fe3cf17 | WithRootDiskSize | ['AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AzureNodePool) WithRootDiskSize(rootDiskSize *int) *AzureNodePool {
a.RootDiskSize = rootDiskSize
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 1d92053f-af97-431e-856c-b5694cd51eb5 | WithNodeCount | ['AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AzureNodePool) WithNodeCount(nodeCount *int) *AzureNodePool {
a.NodeCount = nodeCount
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 5dbc8be7-eba4-4f09-9af9-ce1e80f5642a | WithMarketplaceImage | ['AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AzureNodePool) WithMarketplaceImage(marketplaceImage *azureMarketplaceImage) *AzureNodePool {
a.MarketplaceImage = marketplaceImage
return a
} | hypershift | ||||
function | openshift/openshift-tests-private | 26bfe1dd-94c0-4b30-8a51-06a20203944d | CreateAzureNodePool | ['AzureNodePool'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func (a *AzureNodePool) CreateAzureNodePool() {
gCreateNodePool[*AzureNodePool]("azure", a)
} | hypershift | ||||
function | openshift/openshift-tests-private | eecc1506-1f8b-4867-bf03-e0d5273ec309 | gCreateNodePool | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/nodepool.go | func gCreateNodePool[C any](platform string, nodepool C) {
vars, err := parse(nodepool)
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(vars).ShouldNot(o.BeEmpty())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool %s %s", platform, strings.Join(vars, " "))
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | ||||
file | openshift/openshift-tests-private | 30f23194-e0b2-4d71-a8dd-ad08c24d8901 | pre_start | import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
"os"
"path/filepath"
"regexp"
"strings"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/pre_start.go | package hypershift
import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
"os"
"path/filepath"
"regexp"
"strings"
)
type preStartJob struct {
Name string `json:"NAME"`
Namespace string `json:"NAMESPACE"`
CaseID string `json:"CASEID"`
Action string `json:"ACTION"`
TmpDir string
}
func newPreStartJob(name string, namespace string, caseID string, action string, tmpDir string) *preStartJob {
return &preStartJob{Name: name, Namespace: namespace, CaseID: caseID, Action: action, TmpDir: tmpDir}
}
func (p *preStartJob) create(oc *exutil.CLI) {
out, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", p.Name, "--from-file=KUBECONFIG="+os.Getenv("KUBECONFIG"), "-n", p.Namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("create secret: " + p.Name + ", " + out)
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "anyuid", "-z", "default", "-n", p.Namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("oc adm policy: " + out)
defer exutil.RecoverNamespaceRestricted(oc, p.Namespace)
exutil.SetNamespacePrivileged(oc, p.Namespace)
preStartJobTemplate := filepath.Join(exutil.FixturePath("testdata", "hypershift"), "prestart-job.yaml")
vars, err := parseTemplateVarParams(p)
o.Expect(err).NotTo(o.HaveOccurred())
var params = []string{"--ignore-unknown-parameters=true", "-f", preStartJobTemplate, "-p"}
err = applyResourceFromTemplate(oc, "", p.Name+".yaml", append(params, vars...)...)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(LongTimeout/10, LongTimeout, func() (bool, error) {
value, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("job", "-n", p.Namespace, p.Name, `-ojsonpath={.status.conditions[?(@.type=="Complete")].status}`).Output()
return strings.Contains(value, "True"), nil
})
exutil.AssertWaitPollNoErr(err, "hyperShift operator PreStartJob error, log:"+p.getErrorLog(oc))
}
func (p *preStartJob) delete(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", p.Name, "-n", p.Namespace).Output()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("job", p.Name, "-n", p.Namespace).Output()
}
func (p *preStartJob) getErrorLog(oc *exutil.CLI) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", p.Namespace, "-l", "job-name="+p.Name, `-ojsonpath={.items[0].metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
logs, err := exutil.GetSpecificPodLogs(oc, p.Namespace, "prestart", podName, "\"Error\\|failed\\|error\"")
if err != nil {
return ""
}
return logs
}
func (p *preStartJob) preStartJobIP(oc *exutil.CLI) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", p.Namespace, "-l", "job-name="+p.Name, `-ojsonpath={.items[0].metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
log, err := exutil.GetSpecificPodLogs(oc, p.Namespace, "prestart", podName, `"Your nodeport address is"`)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("preStartJobIP,log:" + log)
// regex for ip
numBlock := "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
regexPattern := numBlock + "\\." + numBlock + "\\." + numBlock + "\\." + numBlock
regEx := regexp.MustCompile(regexPattern)
return regEx.FindString(log)
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 4b265625-7953-4119-aaa3-15ce6c865fff | newPreStartJob | ['preStartJob'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/pre_start.go | func newPreStartJob(name string, namespace string, caseID string, action string, tmpDir string) *preStartJob {
return &preStartJob{Name: name, Namespace: namespace, CaseID: caseID, Action: action, TmpDir: tmpDir}
} | hypershift | ||||
function | openshift/openshift-tests-private | 9b2018b6-30d2-4daf-94ad-5dfac56eafa0 | create | ['"k8s.io/apimachinery/pkg/util/wait"', '"os"', '"path/filepath"', '"strings"'] | ['preStartJob'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/pre_start.go | func (p *preStartJob) create(oc *exutil.CLI) {
out, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", p.Name, "--from-file=KUBECONFIG="+os.Getenv("KUBECONFIG"), "-n", p.Namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("create secret: " + p.Name + ", " + out)
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "anyuid", "-z", "default", "-n", p.Namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("oc adm policy: " + out)
defer exutil.RecoverNamespaceRestricted(oc, p.Namespace)
exutil.SetNamespacePrivileged(oc, p.Namespace)
preStartJobTemplate := filepath.Join(exutil.FixturePath("testdata", "hypershift"), "prestart-job.yaml")
vars, err := parseTemplateVarParams(p)
o.Expect(err).NotTo(o.HaveOccurred())
var params = []string{"--ignore-unknown-parameters=true", "-f", preStartJobTemplate, "-p"}
err = applyResourceFromTemplate(oc, "", p.Name+".yaml", append(params, vars...)...)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(LongTimeout/10, LongTimeout, func() (bool, error) {
value, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("job", "-n", p.Namespace, p.Name, `-ojsonpath={.status.conditions[?(@.type=="Complete")].status}`).Output()
return strings.Contains(value, "True"), nil
})
exutil.AssertWaitPollNoErr(err, "hyperShift operator PreStartJob error, log:"+p.getErrorLog(oc))
} | hypershift | |||
function | openshift/openshift-tests-private | 401909e5-0766-4790-9d78-407f4c98d6b7 | delete | ['preStartJob'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/pre_start.go | func (p *preStartJob) delete(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", p.Name, "-n", p.Namespace).Output()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("job", p.Name, "-n", p.Namespace).Output()
} | hypershift | ||||
function | openshift/openshift-tests-private | ecc8716b-9f74-4a08-99f4-2a5ae071a380 | getErrorLog | ['preStartJob'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/pre_start.go | func (p *preStartJob) getErrorLog(oc *exutil.CLI) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", p.Namespace, "-l", "job-name="+p.Name, `-ojsonpath={.items[0].metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
logs, err := exutil.GetSpecificPodLogs(oc, p.Namespace, "prestart", podName, "\"Error\\|failed\\|error\"")
if err != nil {
return ""
}
return logs
} | hypershift | ||||
function | openshift/openshift-tests-private | 302e834e-606e-4a0a-8141-a56330e12b28 | preStartJobIP | ['"regexp"'] | ['preStartJob'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/pre_start.go | func (p *preStartJob) preStartJobIP(oc *exutil.CLI) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", p.Namespace, "-l", "job-name="+p.Name, `-ojsonpath={.items[0].metadata.name}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
log, err := exutil.GetSpecificPodLogs(oc, p.Namespace, "prestart", podName, `"Your nodeport address is"`)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("preStartJobIP,log:" + log)
// regex for ip
numBlock := "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
regexPattern := numBlock + "\\." + numBlock + "\\." + numBlock + "\\." + numBlock
regEx := regexp.MustCompile(regexPattern)
return regEx.FindString(log)
} | hypershift | |||
file | openshift/openshift-tests-private | 88979146-9c90-4366-a0db-81995d0568da | shared_ingress | import (
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/shared_ingress.go | package hypershift
import (
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
func getSharedIngressRouterExternalIp(oc *exutil.CLI) string {
return doOcpReq(oc, OcpGet, true, "svc", "router", "-n", hypershiftSharedingressNamespace,
"-o=jsonpath={.status.loadBalancer.ingress[0].ip}")
}
| package hypershift | ||||
function | openshift/openshift-tests-private | b3dd6861-4430-4e8e-ace9-b7514a94b03a | getSharedIngressRouterExternalIp | github.com/openshift/openshift-tests-private/test/extended/hypershift/shared_ingress.go | func getSharedIngressRouterExternalIp(oc *exutil.CLI) string {
return doOcpReq(oc, OcpGet, true, "svc", "router", "-n", hypershiftSharedingressNamespace,
"-o=jsonpath={.status.loadBalancer.ingress[0].ip}")
} | hypershift | |||||
file | openshift/openshift-tests-private | 02f76e92-a1d4-4e51-81dc-9107fa2d9295 | util | import (
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"io/ioutil"
"log/slog"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/blang/semver"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/tidwall/gjson"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | package hypershift
import (
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"io/ioutil"
"log/slog"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/blang/semver"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/tidwall/gjson"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
)
func doOcpReq(oc *exutil.CLI, verb OcpClientVerb, notEmpty bool, args ...string) string {
g.GinkgoHelper()
res, err := oc.AsAdmin().WithoutNamespace().Run(verb).Args(args...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if notEmpty {
o.Expect(res).ShouldNot(o.BeEmpty())
}
return res
}
func checkSubstring(src string, expect []string) {
if expect == nil || len(expect) <= 0 {
o.Expect(expect).ShouldNot(o.BeEmpty())
}
for i := 0; i < len(expect); i++ {
o.Expect(src).To(o.ContainSubstring(expect[i]))
}
}
func checkSubstringWithNoExit(src string, expect []string) bool {
if expect == nil || len(expect) <= 0 {
e2e.Logf("Warning expected sub string empty ? %+v", expect)
return true
}
for i := 0; i < len(expect); i++ {
if !strings.Contains(src, expect[i]) {
e2e.Logf("expected sub string %s not in src %s", expect[i], src)
return false
}
}
return true
}
type workload struct {
name string
namespace string
template string
}
func (wl *workload) create(oc *exutil.CLI, kubeconfig, parsedTemplate string, extraParams ...string) {
params := []string{
"--ignore-unknown-parameters=true", "-f", wl.template, "-p", "NAME=" + wl.name, "NAMESPACE=" + wl.namespace,
}
params = append(params, extraParams...)
err := wl.applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, params...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (wl *workload) delete(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
args := []string{"job", wl.name, "-n", wl.namespace}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (wl *workload) applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
return applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, parameters...)
}
// parse a struct for a Template variables to generate params like "NAME=myname", "NAMESPACE=clusters" ...
// currently only support int, string, bool, *int, *string, *bool. A pointer is used to check whether it is set explicitly.
// use json tag as the true variable Name in the struct e.g. < Name string `json:"NAME"`>
func parseTemplateVarParams(obj interface{}) ([]string, error) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return []string{}, errors.New("params must be a pointer pointed to a struct")
}
var params []string
t := v.Elem().Type()
for i := 0; i < t.NumField(); i++ {
if !v.Elem().Field(i).CanInterface() {
continue
}
varName := t.Field(i).Name
varType := t.Field(i).Type
varValue := v.Elem().Field(i).Interface()
tagName := t.Field(i).Tag.Get("json")
if tagName == "" {
continue
}
//handle non nil pointer that set the params explicitly
if varType.Kind() == reflect.Ptr {
if reflect.ValueOf(varValue).IsNil() {
continue
}
switch reflect.ValueOf(varValue).Elem().Type().Kind() {
case reflect.Int:
p := fmt.Sprintf("%s=%d", tagName, reflect.ValueOf(varValue).Elem().Interface().(int))
params = append(params, p)
case reflect.String:
params = append(params, tagName+"="+reflect.ValueOf(varValue).Elem().Interface().(string))
case reflect.Bool:
v, _ := reflect.ValueOf(varValue).Elem().Interface().(bool)
params = append(params, tagName+"="+strconv.FormatBool(v))
default:
e2e.Logf("parseTemplateVarParams params %v invalid, ignore it", varName)
}
continue
}
//non-pointer
switch varType.Kind() {
case reflect.String:
if varValue.(string) != "" {
params = append(params, tagName+"="+varValue.(string))
}
case reflect.Int:
params = append(params, tagName+"="+strconv.Itoa(varValue.(int)))
case reflect.Bool:
params = append(params, tagName+"="+strconv.FormatBool(varValue.(bool)))
default:
e2e.Logf("parseTemplateVarParams params %v not support, ignore it", varValue)
}
}
return params, nil
}
func applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
var configFile string
defer func() {
if len(configFile) > 0 {
_ = os.Remove(configFile)
}
}()
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, true, func(_ context.Context) (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(parsedTemplate)
if err != nil {
e2e.Logf("Error processing template: %v, keep polling", err)
return false, nil
}
configFile = output
return true, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
var args = []string{"-f", configFile}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
return oc.AsAdmin().WithoutNamespace().Run("apply").Args(args...).Execute()
}
func getClusterRegion(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("node", `-ojsonpath={.items[].metadata.labels.topology\.kubernetes\.io/region}`).Output()
}
func getBaseDomain(oc *exutil.CLI) (string, error) {
str, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns/cluster", `-ojsonpath={.spec.baseDomain}`).Output()
if err != nil {
return "", err
}
index := strings.Index(str, ".")
if index == -1 {
return "", fmt.Errorf("can not parse baseDomain because not finding '.'")
}
return str[index+1:], nil
}
func getAWSKey(oc *exutil.CLI) (string, string, error) {
accessKeyID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", `template={{index .data "aws_access_key_id"|base64decode}}`).Output()
if err != nil {
return "", "", err
}
secureKey, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", `template={{index .data "aws_secret_access_key"|base64decode}}`).Output()
if err != nil {
return "", "", err
}
return accessKeyID, secureKey, nil
}
func getAzureKey(oc *exutil.CLI) (string, string, string, string, error) {
clientID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_client_id"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
clientSecret, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_client_secret"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
subscriptionID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_subscription_id"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
tenantID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_tenant_id"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
return clientID, clientSecret, subscriptionID, tenantID, nil
}
/*
parse a structure's tag 'param' and output cli command parameters like --params=$var, support embedded struct
e.g.
Input:
type example struct {
Name string `param:"name"`
PullSecret string `param:"pull_secret"`
} {
Name:"hypershift",
PullSecret:"pullsecret.txt",
}
Output:
--name="hypershift" --pull_secret="pullsecret.txt"
*/
func parse(obj interface{}) ([]string, error) {
var params []string
v := reflect.ValueOf(obj)
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
k := v.Kind()
if k == reflect.Struct {
return parseStruct(v.Interface(), params)
}
return []string{}, fmt.Errorf("unsupported type: %s (supported types: struct, pointer to struct)", k)
}
func parseStruct(obj interface{}, params []string) ([]string, error) {
v := reflect.ValueOf(obj)
t := v.Type()
for i := 0; i < t.NumField(); i++ {
varType := t.Field(i).Type
varValueV := v.Field(i)
if !t.Field(i).IsExported() {
continue
}
if varType.Kind() == reflect.Ptr && varValueV.IsNil() {
continue
}
for varType.Kind() == reflect.Ptr {
varType = varType.Elem()
varValueV = varValueV.Elem()
}
varValue := varValueV.Interface()
varKind := varType.Kind()
var err error
if varKind == reflect.Struct {
params, err = parseStruct(varValue, params)
if err != nil {
return []string{}, err
}
continue
}
tagName := t.Field(i).Tag.Get("param")
if tagName == "" {
continue
}
switch {
case varKind == reflect.Map && isStringMap(varValueV):
params = append(params, stringMapToParams(varValue.(map[string]string), tagName)...)
case varKind == reflect.String:
if varValue.(string) != "" {
params = append(params, "--"+tagName+"="+varValue.(string))
}
case varKind == reflect.Int:
params = append(params, "--"+tagName+"="+strconv.Itoa(varValue.(int)))
case varKind == reflect.Int64:
params = append(params, "--"+tagName+"="+strconv.FormatInt(varValue.(int64), 10))
case varKind == reflect.Bool:
params = append(params, "--"+tagName+"="+strconv.FormatBool(varValue.(bool)))
default:
e2e.Logf("parseTemplateVarParams params %s %v not support, ignore it", varType.Kind(), varValue)
}
}
return params, nil
}
func isStringMap(v reflect.Value) bool {
t := v.Type()
return t.Kind() == reflect.Map &&
t.Key().Kind() == reflect.String &&
t.Elem().Kind() == reflect.String
}
func stringMapToParams(m map[string]string, flagName string) []string {
params := make([]string, 0, len(m))
for k, v := range m {
params = append(params, fmt.Sprintf("--%s=%s=%s", flagName, k, v))
}
return params
}
func getSha256ByFile(file string) string {
ha := sha256.New()
f, err := os.Open(file)
o.Expect(err).ShouldNot(o.HaveOccurred())
defer f.Close()
_, err = io.Copy(ha, f)
o.Expect(err).ShouldNot(o.HaveOccurred())
return fmt.Sprintf("%X", ha.Sum(nil))
}
func getJSONByFile(filePath string, path string) gjson.Result {
file, err := os.Open(filePath)
o.Expect(err).ShouldNot(o.HaveOccurred())
defer file.Close()
con, err := ioutil.ReadAll(file)
o.Expect(err).ShouldNot(o.HaveOccurred())
return gjson.Get(string(con), path)
}
func replaceInFile(file string, old string, new string) error {
input, err := ioutil.ReadFile(file)
if err != nil {
return err
}
output := bytes.Replace(input, []byte(old), []byte(new), -1)
err = ioutil.WriteFile(file, output, 0666)
return err
}
func execCMDOnWorkNodeByBastion(showInfo bool, nodeIP, bastionIP, exec string) string {
var bashClient = NewCmdClient().WithShowInfo(showInfo)
privateKey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
cmd := `chmod 600 ` + privateKey + `; ssh -i ` + privateKey + ` -o StrictHostKeyChecking=no -o ProxyCommand="ssh -i ` + privateKey + " -o StrictHostKeyChecking=no -W %h:%p ec2-user@" + bastionIP + `" core@` + nodeIP + ` '` + exec + `'`
log, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return log
}
func getAllByFile(filePath string) string {
con, err := ioutil.ReadFile(filePath)
o.Expect(err).ShouldNot(o.HaveOccurred())
return string(con)
}
func getAWSPrivateCredentials(defaultCredPaths ...string) string {
g.GinkgoHelper()
// Always prefer environment variable override
if envOverride := os.Getenv(AWSHyperShiftPrivateSecretFile); envOverride != "" {
return envOverride
}
// Running in Prow
if exutil.GetTestEnv().IsRunningInProw() {
return DefaultAWSHyperShiftPrivateSecretFile
}
// Try default paths
var res string
for _, credPath := range defaultCredPaths {
info, err := os.Stat(credPath)
if err != nil {
e2e.Logf("Error inspecting path %s: %v, skipping", credPath, err)
continue
}
if mode := info.Mode(); !mode.IsRegular() {
e2e.Logf("Path %s does not point to a regular file but a(n) %v, skipping", credPath, mode)
continue
}
res = credPath
break
}
o.Expect(res).NotTo(o.BeEmpty())
return res
}
func subtractMinor(version *semver.Version, count uint64) *semver.Version {
result := *version
result.Minor = maxInt64(0, result.Minor-count)
return &result
}
func maxInt64(a, b uint64) uint64 {
if a > b {
return a
}
return b
}
func getHyperShiftOperatorLatestSupportOCPVersion() string {
var bashClient = NewCmdClient().WithShowInfo(true)
res, err := bashClient.Run(fmt.Sprintf("oc logs -n hypershift -lapp=operator --tail=-1 | head -1")).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
re := regexp.MustCompile(`Latest supported OCP: (\d+\.\d+\.\d+)`)
match := re.FindStringSubmatch(res)
o.Expect(len(match) > 1).Should(o.BeTrue())
return match[1]
}
func getHyperShiftSupportedOCPVersion() (semver.Version, semver.Version) {
v := getHyperShiftOperatorLatestSupportOCPVersion()
latestSupportedVersion := semver.MustParse(v)
minSupportedVersion := semver.MustParse(subtractMinor(&latestSupportedVersion, uint64(SupportedPreviousMinorVersions)).String())
return latestSupportedVersion, minSupportedVersion
}
func getMinSupportedOCPVersion() string {
_, minVersion := getHyperShiftSupportedOCPVersion()
return minVersion.String()
}
// getAWSMgmtClusterAvailableZones returns available zones based on mgmt cluster's oc client and region
func getAWSMgmtClusterRegionAvailableZones(oc *exutil.CLI) []string {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSessionWithRegion(region)
availableZones, err := awsClient.GetAvailabilityZoneNames()
o.Expect(err).ShouldNot(o.HaveOccurred())
return availableZones
}
// removeNodesTaint removes the node taint by taintKey if the node exists
func removeNodesTaint(oc *exutil.CLI, nodes []string, taintKey string) {
for _, no := range nodes {
nodeInfo := doOcpReq(oc, OcpGet, false, "no", no, "--ignore-not-found")
if nodeInfo != "" {
doOcpReq(oc, OcpAdm, false, "taint", "node", no, taintKey+"-")
}
}
}
// removeNodesLabel removes the node label by labelKey if the node exists
func removeNodesLabel(oc *exutil.CLI, nodes []string, labelKey string) {
for _, no := range nodes {
nodeInfo := doOcpReq(oc, OcpGet, false, "no", no, "--ignore-not-found")
if nodeInfo != "" {
doOcpReq(oc, OcpLabel, false, "node", no, labelKey+"-")
}
}
}
func getLatestUnsupportedOCPVersion() string {
min := semver.MustParse(getMinSupportedOCPVersion())
return semver.MustParse(subtractMinor(&min, uint64(1)).String()).String()
}
// remove z stream suffix 4.12.0 --> 4.12
func getVersionWithMajorAndMinor(version string) (string, error) {
v := strings.Split(version, ".")
if len(v) == 0 || len(v) > 3 {
return "", fmt.Errorf("invalid version")
}
if len(v) < 3 {
return version, nil
} else {
return strings.Join(v[:2], "."), nil
}
}
// isRequestServingComponent determines if a deployment, replicaset or pod belongs to a serving component
func isRequestServingComponent(name string) bool {
servingComponentRegex := regexp.MustCompile("^(kube-apiserver|ignition-server-proxy|oauth-openshift|router).*")
return servingComponentRegex.MatchString(name)
}
// getTestCaseIDs extracts test case IDs from the Ginkgo nodes. Should be called within g.It.
func getTestCaseIDs() (testCaseIDs []string) {
pattern := `-(\d{5,})-`
re := regexp.MustCompile(pattern)
for _, match := range re.FindAllStringSubmatch(g.CurrentSpecReport().FullText(), -1) {
// Should be fulfilled all the time but just in case
o.Expect(match).To(o.HaveLen(2))
testCaseIDs = append(testCaseIDs, match[1])
}
o.Expect(testCaseIDs).NotTo(o.BeEmpty())
return testCaseIDs
}
// getResourceNamePrefix generates a cloud resource name prefix by concatenating the first test case ID
// with a random string. The resulting string is safe to use as a prefix for cloud resource names.
func getResourceNamePrefix() string {
return fmt.Sprintf("ocp%s-%s", getTestCaseIDs()[0], strings.ToLower(exutil.RandStr(4)))
}
func createTempDir(dir string) {
g.DeferCleanup(func() {
e2e.Logf("Removing temporary directory %s", dir)
o.Expect(os.RemoveAll(dir)).NotTo(o.HaveOccurred(), "failed to remove temporary directory")
})
e2e.Logf("Creating temporary directory %s", dir)
o.Expect(os.MkdirAll(dir, 0755)).NotTo(o.HaveOccurred(), "failed to create temporary directory")
}
func logHypershiftCLIVersion(c *CLI) {
version, err := c.WithShowInfo(true).Run("hypershift version").Output()
if err != nil {
e2e.Logf("Failed to get hypershift CLI version: %v", err)
}
e2e.Logf("Found hypershift CLI version:\n%s", version)
}
func getNsCount(ctx context.Context, c kubernetes.Interface) (int, error) {
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("error listing namespaces: %w", err)
}
return len(nsList.Items), nil
}
func createAndCheckNs(ctx context.Context, c kubernetes.Interface, logger *slog.Logger, numNsToCreate int, nsNamePrefix string) func() error {
return func() error {
g.GinkgoRecover()
logger = logger.With("id", ctx.Value(ctxKeyId))
nsCount, err := getNsCount(ctx, c)
if err != nil {
return fmt.Errorf("error counting namespaces: %v", err)
}
expectedNsCount := nsCount
logger.Info("Got initial namespace count", "nsCount", nsCount)
for i := 0; i < numNsToCreate; i++ {
nsName := fmt.Sprintf("%s-%d", nsNamePrefix, i)
logger.Info("Creating namespace", "nsName", nsName)
if _, err = c.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}}, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("error creating namespace %v: %v", nsName, err)
}
expectedNsCount++
switch nsCount, err = getNsCount(ctx, c); {
case err != nil:
return fmt.Errorf("error counting namespaces: %v", err)
case nsCount != expectedNsCount:
return fmt.Errorf("expect %v namespaces but found %v", expectedNsCount, nsCount)
}
time.Sleep(1 * time.Second)
}
return nil
}
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 8c160f41-21f3-4141-9865-031b114fb248 | doOcpReq | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func doOcpReq(oc *exutil.CLI, verb OcpClientVerb, notEmpty bool, args ...string) string {
g.GinkgoHelper()
res, err := oc.AsAdmin().WithoutNamespace().Run(verb).Args(args...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if notEmpty {
o.Expect(res).ShouldNot(o.BeEmpty())
}
return res
} | hypershift | |||||
function | openshift/openshift-tests-private | 940edd6c-86e1-4fca-b883-ed9c51dfb91e | checkSubstring | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func checkSubstring(src string, expect []string) {
if expect == nil || len(expect) <= 0 {
o.Expect(expect).ShouldNot(o.BeEmpty())
}
for i := 0; i < len(expect); i++ {
o.Expect(src).To(o.ContainSubstring(expect[i]))
}
} | hypershift | |||||
function | openshift/openshift-tests-private | 4df175fb-0d01-4e34-b7c9-b9cb30293270 | checkSubstringWithNoExit | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func checkSubstringWithNoExit(src string, expect []string) bool {
if expect == nil || len(expect) <= 0 {
e2e.Logf("Warning expected sub string empty ? %+v", expect)
return true
}
for i := 0; i < len(expect); i++ {
if !strings.Contains(src, expect[i]) {
e2e.Logf("expected sub string %s not in src %s", expect[i], src)
return false
}
}
return true
} | hypershift | ||||
function | openshift/openshift-tests-private | d58502d1-7317-41fd-a488-c1099578262a | create | ['workload'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func (wl *workload) create(oc *exutil.CLI, kubeconfig, parsedTemplate string, extraParams ...string) {
params := []string{
"--ignore-unknown-parameters=true", "-f", wl.template, "-p", "NAME=" + wl.name, "NAMESPACE=" + wl.namespace,
}
params = append(params, extraParams...)
err := wl.applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, params...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | ||||
function | openshift/openshift-tests-private | fc602483-4f10-46a2-b79e-c6bfc8eb2f47 | delete | ['"os"', '"path/filepath"'] | ['workload'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func (wl *workload) delete(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
args := []string{"job", wl.name, "-n", wl.namespace}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | 34cf8816-e29c-4111-8e1f-afb7c987b2dd | applyResourceFromTemplate | ['workload'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func (wl *workload) applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
return applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, parameters...)
} | hypershift | ||||
function | openshift/openshift-tests-private | 7b5ce719-b122-4b83-b17c-efb40aed9533 | parseTemplateVarParams | ['"errors"', '"fmt"', '"reflect"', '"strconv"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func parseTemplateVarParams(obj interface{}) ([]string, error) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return []string{}, errors.New("params must be a pointer pointed to a struct")
}
var params []string
t := v.Elem().Type()
for i := 0; i < t.NumField(); i++ {
if !v.Elem().Field(i).CanInterface() {
continue
}
varName := t.Field(i).Name
varType := t.Field(i).Type
varValue := v.Elem().Field(i).Interface()
tagName := t.Field(i).Tag.Get("json")
if tagName == "" {
continue
}
//handle non nil pointer that set the params explicitly
if varType.Kind() == reflect.Ptr {
if reflect.ValueOf(varValue).IsNil() {
continue
}
switch reflect.ValueOf(varValue).Elem().Type().Kind() {
case reflect.Int:
p := fmt.Sprintf("%s=%d", tagName, reflect.ValueOf(varValue).Elem().Interface().(int))
params = append(params, p)
case reflect.String:
params = append(params, tagName+"="+reflect.ValueOf(varValue).Elem().Interface().(string))
case reflect.Bool:
v, _ := reflect.ValueOf(varValue).Elem().Interface().(bool)
params = append(params, tagName+"="+strconv.FormatBool(v))
default:
e2e.Logf("parseTemplateVarParams params %v invalid, ignore it", varName)
}
continue
}
//non-pointer
switch varType.Kind() {
case reflect.String:
if varValue.(string) != "" {
params = append(params, tagName+"="+varValue.(string))
}
case reflect.Int:
params = append(params, tagName+"="+strconv.Itoa(varValue.(int)))
case reflect.Bool:
params = append(params, tagName+"="+strconv.FormatBool(varValue.(bool)))
default:
e2e.Logf("parseTemplateVarParams params %v not support, ignore it", varValue)
}
}
return params, nil
} | hypershift | ||||
function | openshift/openshift-tests-private | ef58d14f-6f23-411a-96d5-41dd33ba172d | applyResourceFromTemplate | ['"context"', '"os"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
var configFile string
defer func() {
if len(configFile) > 0 {
_ = os.Remove(configFile)
}
}()
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, true, func(_ context.Context) (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(parsedTemplate)
if err != nil {
e2e.Logf("Error processing template: %v, keep polling", err)
return false, nil
}
configFile = output
return true, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
var args = []string{"-f", configFile}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
return oc.AsAdmin().WithoutNamespace().Run("apply").Args(args...).Execute()
} | hypershift | ||||
function | openshift/openshift-tests-private | a8b7162d-35a5-4547-8db9-5e63ca7d6538 | getClusterRegion | ['"io"', '"k8s.io/client-go/kubernetes"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getClusterRegion(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("node", `-ojsonpath={.items[].metadata.labels.topology\.kubernetes\.io/region}`).Output()
} | hypershift | ||||
function | openshift/openshift-tests-private | e395ac33-fdc5-4c68-bb18-7663ac5cdf69 | getBaseDomain | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getBaseDomain(oc *exutil.CLI) (string, error) {
str, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns/cluster", `-ojsonpath={.spec.baseDomain}`).Output()
if err != nil {
return "", err
}
index := strings.Index(str, ".")
if index == -1 {
return "", fmt.Errorf("can not parse baseDomain because not finding '.'")
}
return str[index+1:], nil
} | hypershift | ||||
function | openshift/openshift-tests-private | beae2103-66f6-4d13-a914-9c26ecb03fc0 | getAWSKey | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getAWSKey(oc *exutil.CLI) (string, string, error) {
accessKeyID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", `template={{index .data "aws_access_key_id"|base64decode}}`).Output()
if err != nil {
return "", "", err
}
secureKey, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", `template={{index .data "aws_secret_access_key"|base64decode}}`).Output()
if err != nil {
return "", "", err
}
return accessKeyID, secureKey, nil
} | hypershift | |||||
function | openshift/openshift-tests-private | 441926df-d1dc-4feb-ace6-05ea7ec97323 | getAzureKey | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getAzureKey(oc *exutil.CLI) (string, string, string, string, error) {
clientID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_client_id"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
clientSecret, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_client_secret"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
subscriptionID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_subscription_id"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
tenantID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o", `template={{index .data "azure_tenant_id"|base64decode}}`).Output()
if err != nil {
return "", "", "", "", err
}
return clientID, clientSecret, subscriptionID, tenantID, nil
} | hypershift | |||||
function | openshift/openshift-tests-private | 43d6cc6d-84d6-49eb-8a73-5c0ebca4ac61 | parse | ['"fmt"', '"reflect"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func parse(obj interface{}) ([]string, error) {
var params []string
v := reflect.ValueOf(obj)
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
k := v.Kind()
if k == reflect.Struct {
return parseStruct(v.Interface(), params)
}
return []string{}, fmt.Errorf("unsupported type: %s (supported types: struct, pointer to struct)", k)
} | hypershift | ||||
function | openshift/openshift-tests-private | 60fcc014-5233-43a9-a768-67766a6e99f9 | parseStruct | ['"reflect"', '"strconv"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func parseStruct(obj interface{}, params []string) ([]string, error) {
v := reflect.ValueOf(obj)
t := v.Type()
for i := 0; i < t.NumField(); i++ {
varType := t.Field(i).Type
varValueV := v.Field(i)
if !t.Field(i).IsExported() {
continue
}
if varType.Kind() == reflect.Ptr && varValueV.IsNil() {
continue
}
for varType.Kind() == reflect.Ptr {
varType = varType.Elem()
varValueV = varValueV.Elem()
}
varValue := varValueV.Interface()
varKind := varType.Kind()
var err error
if varKind == reflect.Struct {
params, err = parseStruct(varValue, params)
if err != nil {
return []string{}, err
}
continue
}
tagName := t.Field(i).Tag.Get("param")
if tagName == "" {
continue
}
switch {
case varKind == reflect.Map && isStringMap(varValueV):
params = append(params, stringMapToParams(varValue.(map[string]string), tagName)...)
case varKind == reflect.String:
if varValue.(string) != "" {
params = append(params, "--"+tagName+"="+varValue.(string))
}
case varKind == reflect.Int:
params = append(params, "--"+tagName+"="+strconv.Itoa(varValue.(int)))
case varKind == reflect.Int64:
params = append(params, "--"+tagName+"="+strconv.FormatInt(varValue.(int64), 10))
case varKind == reflect.Bool:
params = append(params, "--"+tagName+"="+strconv.FormatBool(varValue.(bool)))
default:
e2e.Logf("parseTemplateVarParams params %s %v not support, ignore it", varType.Kind(), varValue)
}
}
return params, nil
} | hypershift | ||||
function | openshift/openshift-tests-private | 523184c6-c83b-47d5-a354-a32a4e69eedc | isStringMap | ['"reflect"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func isStringMap(v reflect.Value) bool {
t := v.Type()
return t.Kind() == reflect.Map &&
t.Key().Kind() == reflect.String &&
t.Elem().Kind() == reflect.String
} | hypershift | ||||
function | openshift/openshift-tests-private | f3e4be94-e067-456b-aad3-018266c22ead | stringMapToParams | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func stringMapToParams(m map[string]string, flagName string) []string {
params := make([]string, 0, len(m))
for k, v := range m {
params = append(params, fmt.Sprintf("--%s=%s=%s", flagName, k, v))
}
return params
} | hypershift | ||||
function | openshift/openshift-tests-private | 28fe7688-0534-4d1c-b378-f0cfe09e3c45 | getSha256ByFile | ['"crypto/sha256"', '"fmt"', '"io"', '"os"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getSha256ByFile(file string) string {
ha := sha256.New()
f, err := os.Open(file)
o.Expect(err).ShouldNot(o.HaveOccurred())
defer f.Close()
_, err = io.Copy(ha, f)
o.Expect(err).ShouldNot(o.HaveOccurred())
return fmt.Sprintf("%X", ha.Sum(nil))
} | hypershift | ||||
function | openshift/openshift-tests-private | 9bc4c6c4-827c-4b71-b8ba-4dd144cb2b78 | getJSONByFile | ['"io/ioutil"', '"os"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getJSONByFile(filePath string, path string) gjson.Result {
file, err := os.Open(filePath)
o.Expect(err).ShouldNot(o.HaveOccurred())
defer file.Close()
con, err := ioutil.ReadAll(file)
o.Expect(err).ShouldNot(o.HaveOccurred())
return gjson.Get(string(con), path)
} | hypershift | ||||
function | openshift/openshift-tests-private | 1fffdbde-01f5-4895-b8fe-642aa54ea1bc | replaceInFile | ['"bytes"', '"io/ioutil"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func replaceInFile(file string, old string, new string) error {
input, err := ioutil.ReadFile(file)
if err != nil {
return err
}
output := bytes.Replace(input, []byte(old), []byte(new), -1)
err = ioutil.WriteFile(file, output, 0666)
return err
} | hypershift | ||||
function | openshift/openshift-tests-private | 0b35a602-db56-4315-8236-e19f742375f2 | execCMDOnWorkNodeByBastion | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func execCMDOnWorkNodeByBastion(showInfo bool, nodeIP, bastionIP, exec string) string {
var bashClient = NewCmdClient().WithShowInfo(showInfo)
privateKey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
cmd := `chmod 600 ` + privateKey + `; ssh -i ` + privateKey + ` -o StrictHostKeyChecking=no -o ProxyCommand="ssh -i ` + privateKey + " -o StrictHostKeyChecking=no -W %h:%p ec2-user@" + bastionIP + `" core@` + nodeIP + ` '` + exec + `'`
log, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return log
} | hypershift | |||||
function | openshift/openshift-tests-private | f1d20e9e-e89e-4714-bf09-6fa689033a1e | getAllByFile | ['"io/ioutil"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getAllByFile(filePath string) string {
con, err := ioutil.ReadFile(filePath)
o.Expect(err).ShouldNot(o.HaveOccurred())
return string(con)
} | hypershift | ||||
function | openshift/openshift-tests-private | 20e17a04-5805-493f-baad-9e3fe3345cb8 | getAWSPrivateCredentials | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getAWSPrivateCredentials(defaultCredPaths ...string) string {
g.GinkgoHelper()
// Always prefer environment variable override
if envOverride := os.Getenv(AWSHyperShiftPrivateSecretFile); envOverride != "" {
return envOverride
}
// Running in Prow
if exutil.GetTestEnv().IsRunningInProw() {
return DefaultAWSHyperShiftPrivateSecretFile
}
// Try default paths
var res string
for _, credPath := range defaultCredPaths {
info, err := os.Stat(credPath)
if err != nil {
e2e.Logf("Error inspecting path %s: %v, skipping", credPath, err)
continue
}
if mode := info.Mode(); !mode.IsRegular() {
e2e.Logf("Path %s does not point to a regular file but a(n) %v, skipping", credPath, mode)
continue
}
res = credPath
break
}
o.Expect(res).NotTo(o.BeEmpty())
return res
} | hypershift | ||||
function | openshift/openshift-tests-private | 71752c20-b864-4cc7-aea7-e344b10cc7d7 | subtractMinor | ['"github.com/blang/semver"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func subtractMinor(version *semver.Version, count uint64) *semver.Version {
result := *version
result.Minor = maxInt64(0, result.Minor-count)
return &result
} | hypershift | ||||
function | openshift/openshift-tests-private | 6a891809-ecf2-4b87-8130-48c7bc786b95 | maxInt64 | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func maxInt64(a, b uint64) uint64 {
if a > b {
return a
}
return b
} | hypershift | |||||
function | openshift/openshift-tests-private | 134e2945-00b9-43f1-b773-e1695cefb2e8 | getHyperShiftOperatorLatestSupportOCPVersion | ['"fmt"', '"regexp"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getHyperShiftOperatorLatestSupportOCPVersion() string {
var bashClient = NewCmdClient().WithShowInfo(true)
res, err := bashClient.Run(fmt.Sprintf("oc logs -n hypershift -lapp=operator --tail=-1 | head -1")).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
re := regexp.MustCompile(`Latest supported OCP: (\d+\.\d+\.\d+)`)
match := re.FindStringSubmatch(res)
o.Expect(len(match) > 1).Should(o.BeTrue())
return match[1]
} | hypershift | ||||
function | openshift/openshift-tests-private | eea4c8b5-3473-48ac-bd2a-32a6dd7a95f2 | getHyperShiftSupportedOCPVersion | ['"github.com/blang/semver"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getHyperShiftSupportedOCPVersion() (semver.Version, semver.Version) {
v := getHyperShiftOperatorLatestSupportOCPVersion()
latestSupportedVersion := semver.MustParse(v)
minSupportedVersion := semver.MustParse(subtractMinor(&latestSupportedVersion, uint64(SupportedPreviousMinorVersions)).String())
return latestSupportedVersion, minSupportedVersion
} | hypershift | ||||
function | openshift/openshift-tests-private | 45f45c11-ccb8-4724-a9d1-7479181c0256 | getMinSupportedOCPVersion | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getMinSupportedOCPVersion() string {
_, minVersion := getHyperShiftSupportedOCPVersion()
return minVersion.String()
} | hypershift | |||||
function | openshift/openshift-tests-private | 1ab2df79-1646-4e33-ab3c-f56fee9e83d3 | getAWSMgmtClusterRegionAvailableZones | ['"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getAWSMgmtClusterRegionAvailableZones(oc *exutil.CLI) []string {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSessionWithRegion(region)
availableZones, err := awsClient.GetAvailabilityZoneNames()
o.Expect(err).ShouldNot(o.HaveOccurred())
return availableZones
} | hypershift | ||||
function | openshift/openshift-tests-private | 4d48fb12-f439-442b-94cf-9b7115728cd3 | removeNodesTaint | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func removeNodesTaint(oc *exutil.CLI, nodes []string, taintKey string) {
for _, no := range nodes {
nodeInfo := doOcpReq(oc, OcpGet, false, "no", no, "--ignore-not-found")
if nodeInfo != "" {
doOcpReq(oc, OcpAdm, false, "taint", "node", no, taintKey+"-")
}
}
} | hypershift | |||||
function | openshift/openshift-tests-private | 2c5ef4eb-65c9-4aca-8593-23e1bb4094a0 | removeNodesLabel | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func removeNodesLabel(oc *exutil.CLI, nodes []string, labelKey string) {
for _, no := range nodes {
nodeInfo := doOcpReq(oc, OcpGet, false, "no", no, "--ignore-not-found")
if nodeInfo != "" {
doOcpReq(oc, OcpLabel, false, "node", no, labelKey+"-")
}
}
} | hypershift | |||||
function | openshift/openshift-tests-private | 2679d6f9-5351-42ac-9a9d-7d7d8295f2e9 | getLatestUnsupportedOCPVersion | ['"github.com/blang/semver"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getLatestUnsupportedOCPVersion() string {
min := semver.MustParse(getMinSupportedOCPVersion())
return semver.MustParse(subtractMinor(&min, uint64(1)).String()).String()
} | hypershift | ||||
function | openshift/openshift-tests-private | 75a81a91-bf97-4c4e-8f3e-a734bf412e11 | getVersionWithMajorAndMinor | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getVersionWithMajorAndMinor(version string) (string, error) {
v := strings.Split(version, ".")
if len(v) == 0 || len(v) > 3 {
return "", fmt.Errorf("invalid version")
}
if len(v) < 3 {
return version, nil
} else {
return strings.Join(v[:2], "."), nil
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 935f615b-2ee5-4644-8617-76bb433f1c7d | isRequestServingComponent | ['"regexp"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func isRequestServingComponent(name string) bool {
servingComponentRegex := regexp.MustCompile("^(kube-apiserver|ignition-server-proxy|oauth-openshift|router).*")
return servingComponentRegex.MatchString(name)
} | hypershift | ||||
function | openshift/openshift-tests-private | d1616ea7-58c7-4bcb-a070-4017de1019dc | getTestCaseIDs | ['"regexp"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getTestCaseIDs() (testCaseIDs []string) {
pattern := `-(\d{5,})-`
re := regexp.MustCompile(pattern)
for _, match := range re.FindAllStringSubmatch(g.CurrentSpecReport().FullText(), -1) {
// Should be fulfilled all the time but just in case
o.Expect(match).To(o.HaveLen(2))
testCaseIDs = append(testCaseIDs, match[1])
}
o.Expect(testCaseIDs).NotTo(o.BeEmpty())
return testCaseIDs
} | hypershift | ||||
function | openshift/openshift-tests-private | 1e1cd4ee-e326-4b83-8997-87e4e5231783 | getResourceNamePrefix | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getResourceNamePrefix() string {
return fmt.Sprintf("ocp%s-%s", getTestCaseIDs()[0], strings.ToLower(exutil.RandStr(4)))
} | hypershift | ||||
function | openshift/openshift-tests-private | 4fc4f1c8-9ff4-4566-a7d7-17b968f5fee0 | createTempDir | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func createTempDir(dir string) {
g.DeferCleanup(func() {
e2e.Logf("Removing temporary directory %s", dir)
o.Expect(os.RemoveAll(dir)).NotTo(o.HaveOccurred(), "failed to remove temporary directory")
})
e2e.Logf("Creating temporary directory %s", dir)
o.Expect(os.MkdirAll(dir, 0755)).NotTo(o.HaveOccurred(), "failed to create temporary directory")
} | hypershift | ||||
function | openshift/openshift-tests-private | 74076f23-f473-45bd-8f7e-2a9afafbb4f4 | logHypershiftCLIVersion | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func logHypershiftCLIVersion(c *CLI) {
version, err := c.WithShowInfo(true).Run("hypershift version").Output()
if err != nil {
e2e.Logf("Failed to get hypershift CLI version: %v", err)
}
e2e.Logf("Found hypershift CLI version:\n%s", version)
} | hypershift | |||||
function | openshift/openshift-tests-private | 2f703082-5790-44d4-9b21-508a6e0d1b7e | getNsCount | ['"context"', '"fmt"', '"k8s.io/client-go/kubernetes"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func getNsCount(ctx context.Context, c kubernetes.Interface) (int, error) {
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return 0, fmt.Errorf("error listing namespaces: %w", err)
}
return len(nsList.Items), nil
} | hypershift | ||||
function | openshift/openshift-tests-private | 3b53aab8-8900-45ab-958d-2643bb71142a | createAndCheckNs | ['"context"', '"fmt"', '"log/slog"', '"time"', '"k8s.io/client-go/kubernetes"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/util.go | func createAndCheckNs(ctx context.Context, c kubernetes.Interface, logger *slog.Logger, numNsToCreate int, nsNamePrefix string) func() error {
return func() error {
g.GinkgoRecover()
logger = logger.With("id", ctx.Value(ctxKeyId))
nsCount, err := getNsCount(ctx, c)
if err != nil {
return fmt.Errorf("error counting namespaces: %v", err)
}
expectedNsCount := nsCount
logger.Info("Got initial namespace count", "nsCount", nsCount)
for i := 0; i < numNsToCreate; i++ {
nsName := fmt.Sprintf("%s-%d", nsNamePrefix, i)
logger.Info("Creating namespace", "nsName", nsName)
if _, err = c.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}}, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("error creating namespace %v: %v", nsName, err)
}
expectedNsCount++
switch nsCount, err = getNsCount(ctx, c); {
case err != nil:
return fmt.Errorf("error counting namespaces: %v", err)
case nsCount != expectedNsCount:
return fmt.Errorf("expect %v namespaces but found %v", expectedNsCount, nsCount)
}
time.Sleep(1 * time.Second)
}
return nil
}
} | hypershift | ||||
file | openshift/openshift-tests-private | af9209eb-0d7e-430a-8f61-2be9b770d158 | installhelper | import (
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
o "github.com/onsi/gomega"
"github.com/tidwall/gjson"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/ptr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/installhelper.go | package hypershift
import (
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
o "github.com/onsi/gomega"
"github.com/tidwall/gjson"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/ptr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
type installHelper struct {
oc *exutil.CLI
bucketName string
region string
artifactDir string
dir string
s3Client *exutil.S3Client
iaasPlatform string
installType AWSEndpointAccessType
externalDNS bool
}
type createCluster struct {
PullSecret string `param:"pull-secret"`
AWSCreds string `param:"aws-creds"`
AzureCreds string `param:"azure-creds"`
Name string `param:"name"`
BaseDomain string `param:"base-domain"`
Namespace string `param:"namespace"`
NodePoolReplicas *int `param:"node-pool-replicas"`
Region string `param:"region"`
Location string `param:"location"`
InfraJSON string `param:"infra-json"`
IamJSON string `param:"iam-json"`
InfraID string `param:"infra-id"`
RootDiskSize *int `param:"root-disk-size"`
AdditionalTags string `param:"additional-tags"`
ControlPlaneAvailabilityPolicy string `param:"control-plane-availability-policy"`
InfraAvailabilityPolicy string `param:"infra-availability-policy"`
Zones string `param:"zones"`
SSHKey string `param:"ssh-key"`
GenerateSSH bool `param:"generate-ssh"`
OLMCatalogPlacement string `param:"olm-catalog-placement"`
FIPS bool `param:"fips"`
Annotations map[string]string `param:"annotations"`
EndpointAccess AWSEndpointAccessType `param:"endpoint-access"`
ExternalDnsDomain string `param:"external-dns-domain"`
ReleaseImage string `param:"release-image"`
ResourceGroupTags string `param:"resource-group-tags"`
EncryptionKeyId string `param:"encryption-key-id"`
}
type infra struct {
AWSCreds string `param:"aws-creds"`
AzureCreds string `param:"azure-creds"`
Name string `param:"name"`
BaseDomain string `param:"base-domain"`
InfraID string `param:"infra-id"`
Location string `param:"location"`
Region string `param:"region"`
RHCOSImage string `param:"rhcos-image"`
Zones string `param:"zones"`
OutputFile string `param:"output-file"`
}
type iam struct {
AWSCreds string `param:"aws-creds"`
InfraID string `param:"infra-id"`
LocalZoneID string `param:"local-zone-id"`
PrivateZoneID string `param:"private-zone-id"`
PublicZoneID string `param:"public-zone-id"`
Region string `param:"region"`
OutputFile string `param:"output-file"`
}
type bastion struct {
Region string `param:"region"`
InfraID string `param:"infra-id"`
SSHKeyFile string `param:"ssh-key-file"`
AWSCreds string `param:"aws-creds"`
}
func (c *createCluster) withName(name string) *createCluster {
c.Name = name
return c
}
func (c *createCluster) withNodePoolReplicas(NodePoolReplicas int) *createCluster {
c.NodePoolReplicas = &NodePoolReplicas
return c
}
func (c *createCluster) withInfraJSON(InfraJSON string) *createCluster {
c.InfraJSON = InfraJSON
return c
}
func (c *createCluster) withIamJSON(IamJSON string) *createCluster {
c.IamJSON = IamJSON
return c
}
func (c *createCluster) withRootDiskSize(RootDiskSize int) *createCluster {
c.RootDiskSize = &RootDiskSize
return c
}
func (c *createCluster) withAdditionalTags(AdditionalTags string) *createCluster {
c.AdditionalTags = AdditionalTags
return c
}
func (c *createCluster) withInfraAvailabilityPolicy(InfraAvailabilityPolicy string) *createCluster {
c.InfraAvailabilityPolicy = InfraAvailabilityPolicy
return c
}
func (c *createCluster) withControlPlaneAvailabilityPolicy(ControlPlaneAvailabilityPolicy string) *createCluster {
c.ControlPlaneAvailabilityPolicy = ControlPlaneAvailabilityPolicy
return c
}
func (c *createCluster) withZones(Zones string) *createCluster {
c.Zones = Zones
return c
}
func (c *createCluster) withSSHKey(SSHKey string) *createCluster {
c.SSHKey = SSHKey
return c
}
func (c *createCluster) withInfraID(InfraID string) *createCluster {
c.InfraID = InfraID
return c
}
func (c *createCluster) withEncryptionKeyId(encryptionKeyId string) *createCluster {
c.EncryptionKeyId = encryptionKeyId
return c
}
func (c *createCluster) withReleaseImage(releaseImage string) *createCluster {
c.ReleaseImage = releaseImage
return c
}
func (i *infra) withInfraID(InfraID string) *infra {
i.InfraID = InfraID
return i
}
func (i *infra) withOutputFile(OutputFile string) *infra {
i.OutputFile = OutputFile
return i
}
func (i *infra) withName(Name string) *infra {
i.Name = Name
return i
}
func (i *infra) withRHCOSImage(rhcosImage string) *infra {
i.RHCOSImage = rhcosImage
return i
}
func (i *iam) withInfraID(InfraID string) *iam {
i.InfraID = InfraID
return i
}
func (i *iam) withOutputFile(OutputFile string) *iam {
i.OutputFile = OutputFile
return i
}
func (c *createCluster) withEndpointAccess(endpointAccess AWSEndpointAccessType) *createCluster {
c.EndpointAccess = endpointAccess
return c
}
func (c *createCluster) withAnnotation(key, value string) *createCluster {
if c.Annotations == nil {
c.Annotations = make(map[string]string)
}
c.Annotations[key] = value
return c
}
func (c *createCluster) withAnnotationMap(annotations map[string]string) *createCluster {
if c.Annotations == nil {
c.Annotations = make(map[string]string)
}
for key, value := range annotations {
c.Annotations[key] = value
}
return c
}
func (c *createCluster) withExternalDnsDomain(externalDnsDomain string) *createCluster {
c.ExternalDnsDomain = externalDnsDomain
return c
}
func (c *createCluster) withBaseDomain(baseDomain string) *createCluster {
c.BaseDomain = baseDomain
return c
}
func (c *createCluster) withResourceGroupTags(rgTags string) *createCluster {
c.ResourceGroupTags = rgTags
return c
}
func (receiver *installHelper) createClusterAWSCommonBuilder() *createCluster {
nodePoolReplicas := 3
baseDomain, err := getBaseDomain(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("current baseDomain %s", baseDomain)
e2e.Logf("extract secret/pull-secret")
receiver.extractPullSecret()
return &createCluster{
PullSecret: receiver.dir + "/.dockerconfigjson",
AWSCreds: receiver.dir + "/credentials",
BaseDomain: baseDomain,
Region: receiver.region,
Namespace: receiver.oc.Namespace(),
NodePoolReplicas: &nodePoolReplicas,
}
}
func (receiver *installHelper) createClusterAzureCommonBuilder() *createCluster {
nodePoolReplicas := 3
baseDomain, err := getBaseDomain(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("current baseDomain:%s", baseDomain)
location, err := getClusterRegion(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("current location:%s", location)
e2e.Logf("extract secret/pull-secret")
receiver.extractPullSecret()
return &createCluster{
PullSecret: receiver.dir + "/.dockerconfigjson",
AzureCreds: receiver.dir + "/credentials",
BaseDomain: baseDomain,
Location: location,
Namespace: receiver.oc.Namespace(),
NodePoolReplicas: &nodePoolReplicas,
}
}
func (receiver *installHelper) createClusterAROCommonBuilder() *createCluster {
location, err := getClusterRegion(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get cluster location")
return &createCluster{
Annotations: map[string]string{podSecurityAdmissionOverrideLabelKey: string(podSecurityBaseline)},
AzureCreds: exutil.MustGetAzureCredsLocation(),
BaseDomain: hypershiftBaseDomainAzure,
ExternalDnsDomain: hypershiftExternalDNSDomainAzure,
FIPS: true,
GenerateSSH: true,
Location: location,
Namespace: receiver.oc.Namespace(),
NodePoolReplicas: ptr.To(2),
OLMCatalogPlacement: olmCatalogPlacementGuest,
PullSecret: exutil.GetTestEnv().PullSecretLocation,
ReleaseImage: exutil.GetLatestReleaseImageFromEnv(),
}
}
func (receiver *installHelper) createInfraCommonBuilder() *infra {
baseDomain, err := getBaseDomain(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("current baseDomain %s", baseDomain)
return &infra{
AWSCreds: receiver.dir + "/credentials",
BaseDomain: baseDomain,
Region: receiver.region,
}
}
func (receiver *installHelper) createInfraAROCommonBuilder() *infra {
location, err := getClusterRegion(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get cluster location")
return &infra{
AzureCreds: exutil.MustGetAzureCredsLocation(),
BaseDomain: hypershiftBaseDomainAzure,
Location: location,
}
}
func (receiver *installHelper) createIamCommonBuilder(infraFile string) *iam {
file, err := os.Open(infraFile)
o.Expect(err).ShouldNot(o.HaveOccurred())
defer file.Close()
con, err := ioutil.ReadAll(file)
o.Expect(err).NotTo(o.HaveOccurred())
return &iam{
AWSCreds: receiver.dir + "/credentials",
Region: receiver.region,
PublicZoneID: gjson.Get(string(con), "publicZoneID").Str,
PrivateZoneID: gjson.Get(string(con), "privateZoneID").Str,
LocalZoneID: gjson.Get(string(con), "localZoneID").Str,
}
}
func (receiver *installHelper) createNodePoolAzureCommonBuilder(clusterName string) *NodePool {
nodeCount := 1
return &NodePool{
Namespace: receiver.oc.Namespace(),
ClusterName: clusterName,
NodeCount: &nodeCount,
}
}
func (receiver *installHelper) newAWSS3Client() string {
accessKeyID, secureKey, err := getAWSKey(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
region, err := getClusterRegion(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("current region %s", region)
content := "[default]\naws_access_key_id=" + accessKeyID + "\naws_secret_access_key=" + secureKey
filePath := receiver.dir + "/credentials"
err = ioutil.WriteFile(filePath, []byte(content), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("extract AWS Credentials")
receiver.s3Client = exutil.NewS3ClientFromCredFile(filePath, "default", region)
receiver.region = region
return filePath
}
func (receiver *installHelper) createAWSS3Bucket() {
o.Expect(receiver.s3Client.HeadBucket(receiver.bucketName)).Should(o.HaveOccurred())
o.Expect(receiver.s3Client.CreateBucket(receiver.bucketName)).ShouldNot(o.HaveOccurred())
bucketPolicyTemplate := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*"
}
]
}`
policy := fmt.Sprintf(bucketPolicyTemplate, receiver.bucketName)
o.Expect(receiver.s3Client.PutBucketPolicy(receiver.bucketName, policy)).To(o.Succeed(), "an error happened while adding a policy to the bucket")
}
func (receiver *installHelper) deleteAWSS3Bucket() {
o.Expect(receiver.s3Client.DeleteBucket(receiver.bucketName)).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) extractAzureCredentials() {
clientID, clientSecret, subscriptionID, tenantID, err := getAzureKey(receiver.oc)
o.Expect(err).NotTo(o.HaveOccurred())
content := "subscriptionId: " + subscriptionID + "\ntenantId: " + tenantID + "\nclientId: " + clientID + "\nclientSecret: " + clientSecret
filePath := receiver.dir + "/credentials"
err = ioutil.WriteFile(filePath, []byte(content), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (receiver *installHelper) hyperShiftInstall() {
// Enable defaulting webhook since 4.14
// Deploy latest hypershift operator since 4.16
// Wait until the hypershift operator has been rolled out and its webhook service is available
cmd := "hypershift install --enable-defaulting-webhook=true --wait-until-available "
// Build up the platform-related part of the installation command
switch receiver.iaasPlatform {
case "aws":
e2e.Logf("Config AWS Bucket")
credsPath := receiver.newAWSS3Client()
receiver.createAWSS3Bucket()
// OIDC
cmd += fmt.Sprintf("--oidc-storage-provider-s3-bucket-name %s --oidc-storage-provider-s3-credentials %s --oidc-storage-provider-s3-region %s ", receiver.bucketName, credsPath, receiver.region)
// Private clusters
if receiver.installType == PublicAndPrivate || receiver.installType == Private {
privateCred := getAWSPrivateCredentials(credsPath)
cmd += fmt.Sprintf(" --private-platform AWS --aws-private-creds %s --aws-private-region=%s ", privateCred, receiver.region)
}
if receiver.externalDNS {
cmd += fmt.Sprintf(" --external-dns-provider=aws --external-dns-credentials=%s --external-dns-domain-filter=%s ", receiver.dir+"/credentials", hypershiftExternalDNSDomainAWS)
}
case "azure":
e2e.Logf("extract Azure Credentials")
receiver.extractAzureCredentials()
}
e2e.Logf("run hypershift install command: %s", cmd)
_, err := NewCmdClient().WithShowInfo(true).Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) hyperShiftUninstall() {
// hypershift install renders crds before resources by default.
// Delete resources before crds to avoid unrecognized resource failure.
e2e.Logf("Uninstalling the Hypershift operator and relevant resources")
var bashClient = NewCmdClient().WithShowInfo(true)
_, err := bashClient.Run("hypershift install render --enable-defaulting-webhook=true --format=yaml --outputs resources | oc delete -f -").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
_, err = bashClient.Run("hypershift install render --enable-defaulting-webhook=true --format=yaml --outputs crds | oc delete -f -").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("Waiting until the Hypershift operator and relevant resources are uninstalled")
o.Eventually(func() string {
value, er := receiver.oc.AsAdmin().WithoutNamespace().Run("get").Args("all", "-n", "hypershift").Output()
if er != nil {
e2e.Logf("error occurred: %v, try next round", er)
return ""
}
return value
}, ShortTimeout, ShortTimeout/10).Should(o.ContainSubstring("No resources found"), "hyperShift operator uninstall error")
}
func (receiver *installHelper) extractPullSecret() {
err := receiver.oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--to="+receiver.dir, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (receiver *installHelper) createAWSHostedClusters(createCluster *createCluster) *hostedCluster {
vars, err := parse(createCluster)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create cluster aws %s %s", strings.Join(vars, " "), ` --annotations=hypershift.openshift.io/cleanup-cloud-resources="true"`)
e2e.Logf("run hypershift create command: %s", cmd)
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("check AWS HostedClusters ready")
cluster := newHostedCluster(receiver.oc, createCluster.Namespace, createCluster.Name)
o.Eventually(cluster.pollHostedClustersReady(), ClusterInstallTimeout, ClusterInstallTimeout/20).Should(o.BeTrue(), "AWS HostedClusters install error")
infraID, err := cluster.getInfraID()
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster.InfraID = infraID
return cluster
}
func (receiver *installHelper) createAWSHostedClusterWithoutCheck(createCluster *createCluster) *hostedCluster {
vars, err := parse(createCluster)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create cluster aws %s", strings.Join(vars, " "))
e2e.Logf("run hypershift create command: %s", cmd)
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return newHostedCluster(receiver.oc, createCluster.Namespace, createCluster.Name)
}
func (receiver *installHelper) createAzureHostedClusters(createCluster *createCluster) *hostedCluster {
cluster := receiver.createAzureHostedClusterWithoutCheck(createCluster)
o.Eventually(cluster.pollHostedClustersReady(), ClusterInstallTimeoutAzure, ClusterInstallTimeoutAzure/20).Should(o.BeTrue(), "azure HostedClusters install error")
infraID, err := cluster.getInfraID()
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster.InfraID = infraID
return cluster
}
func (receiver *installHelper) createAzureHostedClusterWithoutCheck(createCluster *createCluster) *hostedCluster {
vars, err := parse(createCluster)
o.Expect(err).ShouldNot(o.HaveOccurred())
cmd := fmt.Sprintf("hypershift create cluster azure %s", strings.Join(vars, " "))
_, err = NewCmdClient().WithShowInfo(true).Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return newHostedCluster(receiver.oc, createCluster.Namespace, createCluster.Name)
}
func (receiver *installHelper) createAWSHostedClustersRender(createCluster *createCluster, exec func(filename string) error) *hostedCluster {
vars, err := parse(createCluster)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
yamlFile := fmt.Sprintf("%s/%s.yaml", receiver.dir, createCluster.Name)
_, err = bashClient.Run(fmt.Sprintf("hypershift create cluster aws %s --render > %s", strings.Join(vars, " "), yamlFile)).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("exec call-back func")
err = exec(yamlFile)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("apply -f Render...")
err = receiver.oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", yamlFile).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("check AWS HostedClusters ready")
cluster := newHostedCluster(receiver.oc, createCluster.Namespace, createCluster.Name)
o.Eventually(cluster.pollHostedClustersReady(), ClusterInstallTimeout, ClusterInstallTimeout/20).Should(o.BeTrue(), "AWS HostedClusters install error")
infraID, err := cluster.getInfraID()
o.Expect(err).ShouldNot(o.HaveOccurred())
createCluster.InfraID = infraID
return cluster
}
func (receiver *installHelper) destroyAWSHostedClusters(createCluster *createCluster) {
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift destroy cluster aws --aws-creds %s --namespace %s --name %s --region %s", createCluster.AWSCreds, createCluster.Namespace, createCluster.Name, createCluster.Region)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("check destroy AWS HostedClusters")
o.Eventually(pollGetHostedClusters(receiver.oc, receiver.oc.Namespace()), ShortTimeout, ShortTimeout/10).ShouldNot(o.ContainSubstring(createCluster.Name), "destroy AWS HostedClusters error")
}
func (receiver *installHelper) destroyAzureHostedClusters(createCluster *createCluster) {
e2e.Logf("Destroying Azure HC")
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift destroy cluster azure --azure-creds %s --namespace %s --name %s --location %s", createCluster.AzureCreds, createCluster.Namespace, createCluster.Name, createCluster.Location)
out, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred(), "error destroying Azure HC")
e2e.Logf("hypershift destroy output:\n%v", out)
e2e.Logf("Making sure that the HC is gone")
o.Expect(getHostedClusters(receiver.oc, receiver.oc.Namespace())).ShouldNot(o.ContainSubstring(createCluster.Name), "HC persists even after deletion")
}
func (receiver *installHelper) dumpHostedCluster(createCluster *createCluster) error {
// Ensure dump dir exists
dumpDir := path.Join(receiver.dir, createCluster.Name)
if err := os.MkdirAll(dumpDir, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", dumpDir, err)
}
// Dump HC
cmd := fmt.Sprintf("hypershift dump cluster --artifact-dir %s --dump-guest-cluster --name %s --namespace %s", dumpDir, createCluster.Name, createCluster.Namespace)
_ = NewCmdClient().WithShowInfo(true).Run(cmd).Execute()
// Ensure dump artifact dir exists
dumpArtifactDir := path.Join(receiver.artifactDir, createCluster.Name)
if err := os.MkdirAll(dumpArtifactDir, 0755); err != nil {
return fmt.Errorf("failed to create artifact directory %s: %w", dumpArtifactDir, err)
}
// Move dump archive to artifact dir
exutil.MoveFileToPath(path.Join(dumpDir, dumpArchiveName), path.Join(dumpArtifactDir, dumpArchiveName))
e2e.Logf("Dump archive saved to %s", dumpArtifactDir)
return nil
}
func (receiver *installHelper) dumpAROHostedCluster(createCluster *createCluster) error {
if err := os.Setenv(managedServiceKey, managedServiceAROHCP); err != nil {
e2e.Logf("Error setting env %s to %s: %v", managedServiceKey, managedServiceAROHCP, err)
}
return receiver.dumpHostedCluster(createCluster)
}
func (receiver *installHelper) dumpDestroyAROHostedCluster(createCluster *createCluster) {
if g.GetFailer().GetState().Is(types.SpecStateFailureStates) {
if err := receiver.dumpAROHostedCluster(createCluster); err != nil {
e2e.Logf("Error dumping ARO hosted cluster %s: %v", createCluster.Name, err)
}
}
receiver.destroyAzureHostedClusters(createCluster)
}
func (receiver *installHelper) dumpDeleteAROHostedCluster(createCluster *createCluster) {
if g.GetFailer().GetState().Is(types.SpecStateFailureStates) {
if err := receiver.dumpAROHostedCluster(createCluster); err != nil {
e2e.Logf("Error dumping ARO hosted cluster %s: %v", createCluster.Name, err)
}
}
doOcpReq(receiver.oc, OcpDelete, true, "hc", createCluster.Name, "-n", createCluster.Namespace)
}
func (receiver *installHelper) deleteHostedClustersManual(createCluster *createCluster) {
hostedClustersNames, err := getHostedClusters(receiver.oc, receiver.oc.Namespace())
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.Contains(hostedClustersNames, createCluster.Name) {
err = receiver.oc.AsAdmin().WithoutNamespace().Run("delete").Args("hostedcluster", "-n", receiver.oc.Namespace(), createCluster.Name).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
receiver.destroyAWSIam(&iam{AWSCreds: createCluster.AWSCreds, Region: createCluster.Region, InfraID: createCluster.InfraID})
receiver.destroyAWSInfra(&infra{AWSCreds: createCluster.AWSCreds, Region: createCluster.Region, InfraID: createCluster.InfraID, BaseDomain: createCluster.BaseDomain})
}
func (receiver *installHelper) createHostedClusterKubeconfig(createCluster *createCluster, cluster *hostedCluster) {
var bashClient = NewCmdClient().WithShowInfo(true)
hostedClustersKubeconfigFile := receiver.dir + "/guestcluster-kubeconfig-" + createCluster.Name
_, err := bashClient.Run(fmt.Sprintf("hypershift create kubeconfig --namespace %s --name %s > %s", createCluster.Namespace, createCluster.Name, hostedClustersKubeconfigFile)).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
cluster.hostedClustersKubeconfigFile = hostedClustersKubeconfigFile
}
func (receiver *installHelper) createAWSInfra(infra *infra) {
vars, err := parse(infra)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create infra aws %s", strings.Join(vars, " "))
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) destroyAWSInfra(infra *infra) {
e2e.Logf("destroy AWS infrastructure")
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift destroy infra aws --infra-id %s --aws-creds %s --base-domain %s --region %s", infra.InfraID, infra.AWSCreds, infra.BaseDomain, infra.Region)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) createAzureInfra(infra *infra) {
vars, err := parse(infra)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create infra azure %s", strings.Join(vars, " "))
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) destroyAzureInfra(infra *infra) {
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift destroy infra azure --infra-id %s --azure-creds %s --location %s --name %s", infra.InfraID, infra.AzureCreds, infra.Location, infra.Name)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) createAWSIam(iam *iam) {
vars, err := parse(iam)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create iam aws %s", strings.Join(vars, " "))
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) destroyAWSIam(iam *iam) {
e2e.Logf("destroy AWS iam")
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift destroy iam aws --infra-id %s --aws-creds %s --region %s", iam.InfraID, iam.AWSCreds, iam.Region)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) deleteHostedClustersCRAllBackground() {
_, _, _, err := receiver.oc.AsAdmin().WithoutNamespace().Run("delete").Args("hostedcluster", "--all", "-n", receiver.oc.Namespace()).Background()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (receiver *installHelper) createAzureNodePool(nodePool *NodePool) {
vars, err := parse(nodePool)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool azure %s", strings.Join(vars, " "))
_, err = bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (receiver *installHelper) createAWSBastion(bastion *bastion) string {
vars, err := parse(bastion)
o.Expect(err).ShouldNot(o.HaveOccurred())
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create bastion aws %s", strings.Join(vars, " "))
log, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
numBlock := "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
regexPattern := numBlock + "\\." + numBlock + "\\." + numBlock + "\\." + numBlock
regEx := regexp.MustCompile(regexPattern)
return regEx.FindString(log)
}
func (receiver *installHelper) destroyAWSBastion(bastion *bastion) {
e2e.Logf("destroy AWS bastion")
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift destroy bastion aws --infra-id %s --aws-creds %s --region %s", bastion.InfraID, bastion.AWSCreds, bastion.Region)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 18b7803c-8514-4b9d-a46b-2c339b51ab10 | withName | ['createCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/installhelper.go | func (c *createCluster) withName(name string) *createCluster {
c.Name = name
return c
} | hypershift | ||||
function | openshift/openshift-tests-private | a2b1170c-e4d7-4463-a6f0-c4da9aad29f7 | withNodePoolReplicas | ['createCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/installhelper.go | func (c *createCluster) withNodePoolReplicas(NodePoolReplicas int) *createCluster {
c.NodePoolReplicas = &NodePoolReplicas
return c
} | hypershift | ||||
function | openshift/openshift-tests-private | 3c622901-10da-4d6d-a606-326145ba4a87 | withInfraJSON | ['createCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/installhelper.go | func (c *createCluster) withInfraJSON(InfraJSON string) *createCluster {
c.InfraJSON = InfraJSON
return c
} | hypershift |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.