element_type
stringclasses
4 values
project_name
stringclasses
1 value
uuid
stringlengths
36
36
name
stringlengths
0
346
imports
stringlengths
0
2.67k
structs
stringclasses
761 values
interfaces
stringclasses
22 values
file_location
stringclasses
545 values
code
stringlengths
26
8.07M
global_vars
stringclasses
7 values
package
stringclasses
124 values
tags
stringclasses
1 value
test case
openshift/openshift-tests-private
b515ae74-5b13-426d-8a08-42f55eb4dee5
Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-PreChkUpgrade-NonPreRelease-High-54745-Bug clusterResourceQuota objects check
['"fmt"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-PreChkUpgrade-NonPreRelease-High-54745-Bug clusterResourceQuota objects check", func() { var ( caseID = "ocp-54745" namespace = caseID + "-quota-test" clusterQuotaName = caseID + "-crq-test" crqLimits = map[string]string{ "pods": "4", "secrets": "10", "cpu": "7", "memory": "5Gi", "requests.cpu": "6", "requests.memory": "6Gi", "limits.cpu": "6", "limits.memory": "6Gi", "configmaps": "5", "count/deployments.apps": "1", "count/templates.template.openshift.io": "3", "count/servicemonitors.monitoring.coreos.com": "1", } ) exutil.By("1) Create custom project for Pre & Post Upgrade ClusterResourceQuota test.") nsError := oc.WithoutNamespace().AsAdmin().Run("create").Args("ns", namespace).Execute() o.Expect(nsError).NotTo(o.HaveOccurred()) exutil.By("2) Create resource ClusterResourceQuota") err := oc.WithoutNamespace().AsAdmin().Run("create").Args("-n", namespace, "-f", getTestDataFilePath("clusterresourcequota.yaml")).Execute() o.Expect(err).NotTo(o.HaveOccurred()) params := []string{"-n", namespace, "clusterresourequotaremplate", "-p", "NAME=" + clusterQuotaName, "LABEL=" + namespace, "PODS_LIMIT=" + crqLimits["pods"], "SECRETS_LIMIT=" + crqLimits["secrets"], "CPU_LIMIT=" + crqLimits["cpu"], "MEMORY_LIMIT=" + crqLimits["memory"], "REQUESTS_CPU=" + crqLimits["requests.cpu"], "REQUEST_MEMORY=" + crqLimits["requests.memory"], "LIMITS_CPU=" + crqLimits["limits.cpu"], "LIMITS_MEMORY=" + crqLimits["limits.memory"], "CONFIGMAPS_LIMIT=" + crqLimits["configmaps"], "TEMPLATE_COUNT=" + crqLimits["count/templates.template.openshift.io"], "SERVICE_MONITOR=" + crqLimits["count/servicemonitors.monitoring.coreos.com"], "DEPLOYMENT=" + crqLimits["count/deployments.apps"]} quotaConfigFile := exutil.ProcessTemplate(oc, params...) err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-n", namespace, "-f", quotaConfigFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3) Create multiple secrets to test created ClusterResourceQuota, expect failure for secrets creations that exceed quota limit") // Run the function to create secrets createSecretsWithQuotaValidation(oc, namespace, clusterQuotaName, crqLimits, caseID) exutil.By("4) Create few pods before upgrade to check ClusterResourceQuota, Remaining Quota pod will create after upgrade.") podsCount, err := oc.Run("get").Args("-n", namespace, "clusterresourcequota", clusterQuotaName, "-o", `jsonpath={.status.namespaces[*].status.used.pods}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) existingPodCount, _ := strconv.Atoi(podsCount) limits, _ := strconv.Atoi(crqLimits["pods"]) podTemplate := getTestDataFilePath("ocp54745-pod.yaml") for i := existingPodCount; i < limits-2; i++ { podname := fmt.Sprintf("%v-pod-%d", caseID, i) params := []string{"-n", namespace, "-f", podTemplate, "-p", "NAME=" + podname, "REQUEST_MEMORY=1Gi", "REQUEST_CPU=1", "LIMITS_MEMORY=1Gi", "LIMITS_CPU=1"} podConfigFile := exutil.ProcessTemplate(oc, params...) err = oc.AsAdmin().WithoutNamespace().Run("-n", namespace, "create").Args("-f", podConfigFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("5) Create new app & Service Monitor to check quota exceeded") err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-n", namespace, "-f", getTestDataFilePath("service-monitor.yaml")).Execute() o.Expect(err).NotTo(o.HaveOccurred()) for count := 1; count < 3; count++ { appName := fmt.Sprintf("%v-app-%v", caseID, count) image := "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83" output, err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args(fmt.Sprintf("--name=%v", appName), image, "-n", namespace).Output() if count <= limits { o.Expect(err).NotTo(o.HaveOccurred()) } else { o.Expect(output).To(o.MatchRegexp("deployments.apps.*forbidden: exceeded quota")) } params = []string{"-n", namespace, "servicemonitortemplate", "-p", fmt.Sprintf("NAME=%v-service-monitor-%v", caseID, count), "DEPLOYMENT=" + crqLimits["count/deployments.apps"], } serviceMonitor := exutil.ProcessTemplate(oc, params...) output, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-n", namespace, "-f", serviceMonitor).Output() limits, _ = strconv.Atoi(crqLimits["count/servicemonitors.monitoring.coreos.com"]) if count <= limits { o.Expect(err).NotTo(o.HaveOccurred()) } else { o.Expect(output).To(o.MatchRegexp("servicemonitors.*forbidden: exceeded quota")) } } exutil.By("6) Compare applied ClusterResourceQuota") for resourceName, limit := range crqLimits { resource, err := oc.Run("get").Args("-n", namespace, "clusterresourcequota", clusterQuotaName, "-o", fmt.Sprintf(`jsonpath={.status.namespaces[*].status.used.%v}`, strings.ReplaceAll(resourceName, ".", "\\."))).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedResource, _ := strconv.Atoi(strings.Trim(resource, "Gi")) limits, _ := strconv.Atoi(strings.Trim(limit, "Gi")) if 0 < usedResource && usedResource <= limits { e2e.Logf("Test Passed: ClusterResourceQuota for Resource %v is in applied limit", resourceName) } else { e2e.Failf("Test Failed: ClusterResourceQuota for Resource %v is not in applied limit", resourceName) } } })
test case
openshift/openshift-tests-private
17fe55c4-2107-49e0-b12a-f78b1e5b63d9
Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-PstChkUpgrade-NonPreRelease-High-54745-Bug clusterResourceQuota objects check
['"fmt"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-PstChkUpgrade-NonPreRelease-High-54745-Bug clusterResourceQuota objects check", func() { var ( caseID = "ocp-54745" namespace = caseID + "-quota-test" clusterQuotaName = caseID + "-crq-test" crqLimits = map[string]string{ "pods": "4", "secrets": "10", "cpu": "7", "memory": "5Gi", "requestsCpu": "6", "requestsMemory": "6Gi", "limitsCpu": "6", "limitsMemory": "6Gi", "configmaps": "5", } ) // Cleanup resources after this test, created in PreChkUpgrade defer oc.AsAdmin().WithoutNamespace().Run("delete", "project").Args(namespace).Execute() defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("-n", namespace, "clusterresourcequota", clusterQuotaName).Execute() exutil.By("6) Create pods after upgrade to check ClusterResourceQuota") podsCount, err := oc.Run("get").Args("-n", namespace, "clusterresourcequota", clusterQuotaName, "-o", `jsonpath={.status.namespaces[*].status.used.pods}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) existingPodCount, _ := strconv.Atoi(podsCount) limits, _ := strconv.Atoi(crqLimits["pods"]) podTemplate := getTestDataFilePath("ocp54745-pod.yaml") for i := existingPodCount; i <= limits; i++ { podname := fmt.Sprintf("%v-pod-%d", caseID, i) params := []string{"-n", namespace, "-f", podTemplate, "-p", "NAME=" + podname, "REQUEST_MEMORY=1Gi", "REQUEST_CPU=1", "LIMITS_MEMORY=1Gi", "LIMITS_CPU=1"} podConfigFile := exutil.ProcessTemplate(oc, params...) output, err := oc.AsAdmin().WithoutNamespace().Run("-n", namespace, "create").Args("-f", podConfigFile).Output() exutil.By(fmt.Sprintf("5.%d) creating pod %s", i, podname)) if i < limits { o.Expect(err).NotTo(o.HaveOccurred()) } else { o.Expect(output).To(o.MatchRegexp("pods.*forbidden: exceeded quota")) } } exutil.By("7) Create multiple configmap to test created ClusterResourceQuota, expect failure for configmap creations that exceed quota limit") cmCount, err := oc.Run("get").Args("-n", namespace, "clusterresourcequota", clusterQuotaName, "-o", `jsonpath={.status.namespaces[*].status.used.configmaps}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) cmUsedCount, _ := strconv.Atoi(cmCount) limits, _ = strconv.Atoi(crqLimits["configmaps"]) for i := cmUsedCount; i <= limits; i++ { configmapName := fmt.Sprintf("%v-configmap-%d", caseID, i) output, err := oc.Run("create").Args("-n", namespace, "configmap", configmapName).Output() exutil.By(fmt.Sprintf("7.%d) creating configmap %s", i, configmapName)) if i < limits { o.Expect(err).NotTo(o.HaveOccurred()) } else { o.Expect(output).To(o.MatchRegexp("configmaps.*forbidden: exceeded quota")) } } exutil.By("8) Compare applied ClusterResourceQuota") for _, resourceName := range []string{"pods", "secrets", "cpu", "memory", "configmaps"} { resource, err := oc.Run("get").Args("-n", namespace, "clusterresourcequota", clusterQuotaName, "-o", fmt.Sprintf(`jsonpath={.status.namespaces[*].status.used.%v}`, resourceName)).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedResource, _ := strconv.Atoi(strings.Trim(resource, "mGi")) limits, _ := strconv.Atoi(strings.Trim(crqLimits[resourceName], "mGi")) if 0 < usedResource && usedResource <= limits { e2e.Logf("Test Passed: ClusterResourceQuota for Resource %v is in applied limit", resourceName) } else { e2e.Failf("Test Failed: ClusterResourceQuota for Resource %v is not in applied limit", resourceName) } } })
test case
openshift/openshift-tests-private
351606a9-36f5-4268-b89e-8e3869a82671
Author:rgangwar-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Medium-10350-[Apiserver] compensate for raft/cache delay in namespace admission
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Medium-10350-[Apiserver] compensate for raft/cache delay in namespace admission", func() { tmpnamespace := "ocp-10350" + exutil.GetRandomString() defer oc.AsAdmin().Run("delete").Args("ns", tmpnamespace, "--ignore-not-found").Execute() exutil.By("1.) Create new namespace") // Description of case: detail see PR https://github.com/openshift/cucushift/pull/9495 // We observe how long it takes to delete one Terminating namespace that has Terminating pod when cluster is under some load. // Thus wait up to 200 seconds and also calculate the actual time so that when it FIRST hits > 200 seconds, we fail it IMMEDIATELY. // Through this way we know the actual time DIRECTLY from the test logs, useful to file a performance bug with PRESENT evidence already, meanwhile the scenario will not cost really long time. // Temporarily increase the assertion to 200 seconds to make it not often fail and to reduce debugging effort. Once the bug (Ref:2038780) is fixed, we will revert to 90. expectedOutageTime := 90 for i := 0; i < 15; i++ { var namespaceErr error projectSuccTime := time.Now() err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { namespaceOutput, namespaceErr := oc.WithoutNamespace().Run("create").Args("ns", tmpnamespace).Output() if namespaceErr == nil { e2e.Logf("oc create ns %v created successfully", tmpnamespace) projectSuccTime = time.Now() o.Expect(namespaceOutput).Should(o.ContainSubstring(fmt.Sprintf("namespace/%v created", tmpnamespace)), fmt.Sprintf("namespace/%v not created", tmpnamespace)) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("oc create ns %v failed :: %v", tmpnamespace, namespaceErr)) exutil.By("2.) Create new app") var apperr error errApp := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { apperr := oc.WithoutNamespace().Run("new-app").Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", tmpnamespace, "--import-mode=PreserveOriginal").Execute() if apperr != nil { return false, nil } e2e.Logf("oc new app succeeded") return true, nil }) exutil.AssertWaitPollNoErr(errApp, fmt.Sprintf("oc new app failed :: %v", apperr)) var poderr error errPod := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { podOutput, poderr := oc.WithoutNamespace().Run("get").Args("pod", "-n", tmpnamespace, "--no-headers").Output() if poderr == nil && strings.Contains(podOutput, "Running") { e2e.Logf("Pod %v succesfully", podOutput) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errPod, fmt.Sprintf("Pod not running :: %v", poderr)) exutil.By("3.) Delete new namespace") var delerr error projectdelerr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { delerr = oc.Run("delete").Args("namespace", tmpnamespace).Execute() if delerr != nil { return false, nil } e2e.Logf("oc delete namespace succeeded") return true, nil }) exutil.AssertWaitPollNoErr(projectdelerr, fmt.Sprintf("oc delete namespace failed :: %v", delerr)) var chkNamespaceErr error errDel := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { chkNamespaceOutput, chkNamespaceErr := oc.WithoutNamespace().Run("get").Args("namespace", tmpnamespace, "--ignore-not-found").Output() if chkNamespaceErr == nil && strings.Contains(chkNamespaceOutput, "") { e2e.Logf("Namespace deleted %v successfully", tmpnamespace) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errDel, fmt.Sprintf("Namespace %v not deleted successfully, still visible after delete :: %v", tmpnamespace, chkNamespaceErr)) projectDelTime := time.Now() diff := projectDelTime.Sub(projectSuccTime) e2e.Logf("#### Namespace success and delete time(s) :: %f ####\n", diff.Seconds()) if int(diff.Seconds()) > expectedOutageTime { e2e.Failf("#### Test case Failed in %d run :: The Namespace success and deletion outage time lasted %d longer than we expected %d", i, int(diff.Seconds()), expectedOutageTime) } e2e.Logf("#### Test case passed in %d run :: Namespace success and delete time(s) :: %f ####\n", i, diff.Seconds()) } })
test case
openshift/openshift-tests-private
15be0bef-2bda-4bf6-b75e-ad9885fba0e2
Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Longduration-NonPreRelease-High-56693-[Apiserver] Make SAR traffic from oauth and openshift apiserver exempt with API Priority and Fairness feature [Slow][Disruptive]
['"encoding/json"', '"fmt"', '"os"', '"os/exec"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Longduration-NonPreRelease-High-56693-[Apiserver] Make SAR traffic from oauth and openshift apiserver exempt with API Priority and Fairness feature [Slow][Disruptive]", func() { // The case is from customer bug 1888309 var ( patchJSON = `[{"op": "replace", "path": "/spec/logLevel", "value": "TraceAll"}]` restorePatchJSON = `[{"op": "replace", "path": "/spec/logLevel", "value": "Normal"}]` expectedStatus = map[string]string{"Progressing": "True"} kubeApiserverCoStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} caseID = "OCP-56693" dirname = "/tmp/-" + caseID ) defer os.RemoveAll(dirname) defer func() { exutil.By("4)Restoring the loglevel to the default setting ...") output, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("kubeapiserver/cluster", "--type=json", "-p", restorePatchJSON).Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "patched (no change)") { e2e.Logf("kubeapiserver/cluster logLevel is not changed to the default values") } else { e2e.Logf("kubeapiserver/cluster logLevel is changed to the default values") exutil.By("4) Checking KAS operator should be in Progressing and Available after rollout and recovery") e2e.Logf("Checking kube-apiserver operator should be in Progressing in 100 seconds") err = waitCoBecomes(oc, "kube-apiserver", 100, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") err = waitCoBecomes(oc, "kube-apiserver", 1500, kubeApiserverCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") logLevel, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiserver/cluster", "-o", `jsonpath={.spec.logLevel}`).Output() o.Expect(err1).NotTo(o.HaveOccurred()) o.Expect(logLevel).Should(o.Equal(`Normal`)) } }() err := os.MkdirAll(dirname, 0o755) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("1) Checking if oauth and openshift apiserver exempt with API Priority and Fairness feature") output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("FlowSchema").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.MatchRegexp("openshift-apiserver-sar.*exempt")) o.Expect(output).Should(o.MatchRegexp("openshift-oauth-apiserver-sar.*exempt")) err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("kubeapiserver/cluster", "--type=json", "-p", patchJSON).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2) Checking KAS operator should be in Progressing and Available after rollout and recovery") e2e.Logf("Checking kube-apiserver operator should be in Progressing in 100 seconds") err = waitCoBecomes(oc, "kube-apiserver", 100, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") err = waitCoBecomes(oc, "kube-apiserver", 1500, kubeApiserverCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") logLevel, logLevelErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiserver/cluster", "-o", `jsonpath={.spec.logLevel}`).Output() o.Expect(logLevelErr).NotTo(o.HaveOccurred()) o.Expect(logLevel).Should(o.Equal(`TraceAll`)) exutil.By("3) Checking if SAR traffics from flowschema openshift-apiserver and oauth-apiserver found in KAS logs") kasPods, err := exutil.GetAllPodsWithLabel(oc, "openshift-kube-apiserver", "app=openshift-kube-apiserver") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(kasPods).ShouldNot(o.BeEmpty()) for _, kasPod := range kasPods { e2e.Logf("pod name:%s", kasPod) _, errlog := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-kube-apiserver", kasPod).OutputToFile(caseID + "/kas.log." + kasPod) o.Expect(errlog).NotTo(o.HaveOccurred()) } cmd := fmt.Sprintf(`grep 'startRequest' %v | grep 'system:serviceaccount:openshift-apiserver:openshift-apiserver-sa' | grep -iE 'immediate|exempt' | head -1`, dirname+"/kas.log.*") e2e.Logf(cmd) noOASLogs, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cmd = fmt.Sprintf(`grep 'startRequest' %v | grep 'system:serviceaccount:openshift-oauth-apiserver:oauth-apiserver-sa' | grep -iE 'immediate|exempt' | head -1`, dirname+"/kas.log.*") e2e.Logf(cmd) noOAUTHLogs, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(noOASLogs) > 0 && len(noOAUTHLogs) > 0 { e2e.Logf("Found SAR traffics from flowschema openshift-apiserver:%s", noOASLogs) e2e.Logf("Found SAR traffics from flowschema oauth-apiserver: %s", noOAUTHLogs) e2e.Logf("Test Passed!") } else { e2e.Failf("Test Failed: No SAR traffics from flowschema openshift-apiserver and oauth-apiserver found in KAS logs") } })
test case
openshift/openshift-tests-private
4f555251-2f38-401c-93c1-40da07144acf
Author:kewang-WRS-NonHyperShiftHOST-ARO-Medium-57243-V-BR.33-V-BR.39-[Apiserver] Viewing audit logs
['"fmt"', '"os"', '"os/exec"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-WRS-NonHyperShiftHOST-ARO-Medium-57243-V-BR.33-V-BR.39-[Apiserver] Viewing audit logs", func() { var ( apiservers = []string{"openshift-apiserver", "kube-apiserver", "oauth-apiserver"} caseID = "OCP-57243" dirname = "/tmp/-" + caseID mustgatherDir = dirname + "/must-gather.ocp-57243" ) defer os.RemoveAll(dirname) err := os.MkdirAll(dirname, 0o755) o.Expect(err).NotTo(o.HaveOccurred()) err = os.MkdirAll(mustgatherDir, 0o755) o.Expect(err).NotTo(o.HaveOccurred()) masterNode, masterErr := exutil.GetFirstMasterNode(oc) o.Expect(masterErr).NotTo(o.HaveOccurred()) e2e.Logf("Master node is %v : ", masterNode) for i, apiserver := range apiservers { exutil.By(fmt.Sprintf("%d.1)View the %s audit logs are available for each control plane node:", i+1, apiserver)) output, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", "--role=master", "--path="+apiserver+"/").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.MatchRegexp(".*audit.log")) e2e.Logf("The OpenShift API server audit logs are available for each control plane node:\n%s", output) exutil.By(fmt.Sprintf("%d.2) View a specific %s audit log by providing the node name and the log name:", i+1, apiserver)) auditLogFile := fmt.Sprintf("%s/%s-audit.log", caseID, apiserver) _, err1 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", masterNode, "--path="+apiserver+"/audit.log").OutputToFile(auditLogFile) o.Expect(err1).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf(`tail -1 %v`, "/tmp/-"+auditLogFile) cmdOut, cmdErr := exec.Command("bash", "-c", cmd).Output() o.Expect(cmdErr).NotTo(o.HaveOccurred()) e2e.Logf("An example of %s audit log:\n%s", apiserver, cmdOut) } exutil.By("4) Gathering audit logs to run the oc adm must-gather command and view the audit log files:") _, mgErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir="+mustgatherDir, "--", "/usr/bin/gather_audit_logs").Output() o.Expect(mgErr).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf(`du -h %v`, mustgatherDir) cmdOut, cmdErr := exec.Command("bash", "-c", cmd).Output() o.Expect(cmdErr).NotTo(o.HaveOccurred()) e2e.Logf("View the audit log files for running the oc adm must-gather command:\n%s", cmdOut) // Empty audit log file is not expected. o.Expect(cmdOut).ShouldNot(o.ContainSubstring("0B")) })
test case
openshift/openshift-tests-private
553fd83c-6eaa-4b5e-acc4-a3f3326c3d55
Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-PstChkUpgrade-NonPreRelease-Medium-56934-[Apiserver] bug Ensure unique CA serial numbers, after enable automated service CA rotation
['"context"', '"fmt"', '"net/http"', '"os"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-PstChkUpgrade-NonPreRelease-Medium-56934-[Apiserver] bug Ensure unique CA serial numbers, after enable automated service CA rotation", func() { var ( dirname = "/tmp/-OCP-56934/" ) exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } defer os.RemoveAll(dirname) err := os.MkdirAll(dirname, 0755) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Cluster should be healthy before running case.") err = clusterHealthcheck(oc, "OCP-56934/log") if err == nil { e2e.Logf("Cluster health check passed before running case") } else { g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err)) } exutil.By("1. Get openshift-apiserver pods and endpoints ip & port") podName, podGetErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-apiserver", "pod", "--field-selector=status.phase=Running", "-o", "jsonpath={.items[0].metadata.name}").Output() o.Expect(podGetErr).NotTo(o.HaveOccurred()) endpointIP, epGetErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-apiserver", "endpoints", "api", "-o", fmt.Sprintf(`jsonpath={.subsets[*].addresses[?(@.targetRef.name=="%v")].ip}`, podName)).Output() o.Expect(epGetErr).NotTo(o.HaveOccurred()) exutil.By("2. Check openshift-apiserver https api metrics endpoint URL") err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { metricsUrl := fmt.Sprintf(`https://%v:8443/metrics`, string(endpointIP)) metricsOut, metricsErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-apiserver", podName, "-c", "openshift-apiserver", "--", "curl", "-k", "--connect-timeout", "5", "--retry", "2", "-N", "-s", metricsUrl).Output() if metricsErr == nil { o.Expect(metricsOut).ShouldNot(o.ContainSubstring("You are attempting to import a cert with the same issuer/serial as an existing cert, but that is not the same cert")) o.Expect(metricsOut).Should(o.ContainSubstring("Forbidden")) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, "Test Failed") })
test case
openshift/openshift-tests-private
be494ea1-bd9d-42f3-b52f-4d24b214ddb9
Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-ConnectedOnly-High-53229-[Apiserver] Test Arbitrary path injection via type field in CNI configuration
['"context"', '"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-ConnectedOnly-High-53229-[Apiserver] Test Arbitrary path injection via type field in CNI configuration", func() { exutil.By("1) Create new project") oc.SetupProject() namespace := oc.Namespace() exutil.By("2) Create NetworkAttachmentDefinition with name nefarious-conf using nefarious.yaml") nefariousConfTemplate := getTestDataFilePath("ocp53229-nefarious.yaml") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", namespace, "-f", nefariousConfTemplate).Execute() nefariousConfErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", namespace, "-f", nefariousConfTemplate).Execute() o.Expect(nefariousConfErr).NotTo(o.HaveOccurred()) exutil.By("3) Create Pod by using created NetworkAttachmentDefinition in annotations") nefariousPodTemplate := getTestDataFilePath("ocp53229-nefarious-pod.yaml") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", namespace, "-f", nefariousPodTemplate).Execute() nefariousPodErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", namespace, "-f", nefariousPodTemplate).Execute() o.Expect(nefariousPodErr).NotTo(o.HaveOccurred()) exutil.By("4) Check pod should be in creating or failed status and event should show error message invalid plugin") podStatus, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", namespace, "-f", nefariousPodTemplate, "-o", "jsonpath={.status.phase}").Output() o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(podStatus).ShouldNot(o.ContainSubstring("Running")) err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 2*time.Minute, false, func(cxt context.Context) (bool, error) { podEvent, podEventErr := oc.AsAdmin().WithoutNamespace().Run("describe").Args("-n", namespace, "-f", nefariousPodTemplate).Output() o.Expect(podEventErr).NotTo(o.HaveOccurred()) matched, _ := regexp.MatchString("error adding pod.*to CNI network.*invalid plugin name: ../../../../usr/sbin/reboot", podEvent) if matched { e2e.Logf("Step 4. Test Passed") return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, "Detected event CNI network invalid plugin") exutil.By("5) Check pod created on node should not be rebooting and appear offline") nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", namespace, "-f", nefariousPodTemplate, "-o", "jsonpath={.spec.nodeName}").Output() o.Expect(nodeErr).NotTo(o.HaveOccurred()) nodeStatus, nodeStatusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "--no-headers").Output() o.Expect(nodeStatusErr).NotTo(o.HaveOccurred()) o.Expect(nodeStatus).Should(o.ContainSubstring("Ready")) })
test case
openshift/openshift-tests-private
3eec969c-9f02-478c-b862-455d3b7769cb
Author:rgangwar-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-43261-V-BR.33-V-BR.39-[Apiserver] APIServer Support None audit policy [Disruptive][Slow]
['"context"', '"encoding/json"', '"fmt"', '"strings"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-43261-V-BR.33-V-BR.39-[Apiserver] APIServer Support None audit policy [Disruptive][Slow]", func() { var ( patch = `[{"op": "replace", "path": "/spec/audit", "value":{"profile":"None"}}]` patchToRecover = `[{"op": "replace", "path": "/spec/audit", "value":{"profile":"Default"}}]` expectedProgCoStatus = map[string]string{"Progressing": "True"} expectedCoStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} coOps = []string{"authentication", "openshift-apiserver"} ) defer func() { contextErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", "admin").Execute() o.Expect(contextErr).NotTo(o.HaveOccurred()) contextOutput, contextErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-context").Output() o.Expect(contextErr).NotTo(o.HaveOccurred()) e2e.Logf("Context after rollack :: %v", contextOutput) }() defer func() { exutil.By("Restoring apiserver/cluster's profile") output, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patchToRecover).Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "patched (no change)") { e2e.Logf("Apiserver/cluster's audit profile not changed from the default values") } else { exutil.By("Checking KAS, OAS, Auththentication operators should be in Progressing and Available after rollout and recovery") e2e.Logf("Checking kube-apiserver operator should be in Progressing in 100 seconds") err = waitCoBecomes(oc, "kube-apiserver", 100, expectedProgCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") // Using 60s because KAS takes long time, when KAS finished rotation, OAS and Auth should have already finished. for _, ops := range coOps { e2e.Logf("Checking %s should be Available in 60 seconds", ops) err = waitCoBecomes(oc, ops, 60, expectedCoStatus) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%v operator is not becomes available in 60 seconds", ops)) } } }() exutil.By("1. Set None profile to audit log") output, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("patched"), "apiserver/cluster not patched") exutil.By("2. Checking KAS, OAS, Auththentication operators should be in Progressing and Available after rollout and recovery") exutil.By("2.1 Checking kube-apiserver operator should be in Progressing in 100 seconds") err = waitCoBecomes(oc, "kube-apiserver", 100, expectedProgCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") exutil.By("2.2 Checking kube-apiserver operator should be Available in 1500 seconds") err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") // Using 60s because KAS takes long time, when KAS finished rotation, OAS and Auth should have already finished. i := 3 for _, ops := range coOps { exutil.By(fmt.Sprintf("2.%d Checking %s should be Available in 60 seconds", i, ops)) err = waitCoBecomes(oc, ops, 60, expectedCoStatus) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%v operator is not becomes available in 60 seconds", ops)) i = i + 1 } e2e.Logf("KAS, OAS and Auth operator are available after rollout") // Must-gather for audit logs // Related bug 2008223 // Due to bug 2040654, exit code is unable to get failure exit code from executed script, so the step will succeed here. exutil.By("3. Get must-gather audit logs") msg, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir=/"+tmpdir+"/audit_must_gather_OCP-43261", "--", "/usr/bin/gather_audit_logs").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(msg, "ERROR: To raise a Red Hat support request")).Should(o.BeTrue()) o.Expect(strings.Contains(msg, "spec.audit.profile")).Should(o.BeTrue()) exutil.By("4. Check if there is no new audit logs are generated after None profile setting.") errUser := oc.AsAdmin().WithoutNamespace().Run("login").Args("-u", "system:admin", "-n", "default").Execute() o.Expect(errUser).NotTo(o.HaveOccurred()) // Define the command to run on each node now := time.Now().UTC().Format("2006-01-02 15:04:05") script := fmt.Sprintf(`for logpath in kube-apiserver oauth-apiserver openshift-apiserver; do grep -h system:authenticated:oauth /var/log/${logpath}/audit*.log | jq -c 'select (.requestReceivedTimestamp | .[0:19] + "Z" | fromdateiso8601 > "%s")' >> /tmp/OCP-43261-$logpath.json; done; cat /tmp/OCP-43261-*.json`, now) exutil.By("4.1 Get all master nodes.") masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) counter := 0 for _, masterNode := range masterNodes { exutil.By(fmt.Sprintf("4.2 Get audit log file from %s", masterNode)) masterNodeLogs, checkLogFileErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=openshift-kube-apiserver"}, "bash", "-c", script) o.Expect(checkLogFileErr).NotTo(o.HaveOccurred()) errCount := strings.Count(strings.TrimSpace(masterNodeLogs), "\n") if errCount > 0 { e2e.Logf("Error logs on master node %v :: %v", masterNode, masterNodeLogs) } counter = errCount + counter } if counter > 0 { e2e.Failf("Audit logs counts increased :: %d", counter) } })
test case
openshift/openshift-tests-private
9cce0c11-eabc-458d-90a9-f33ea8704335
Author:rgangwar-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-33427-V-BR.33-V-BR.39-[Apiserver] customize audit config of apiservers [Disruptive][Slow]
['"fmt"', '"strconv"', '"strings"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-33427-V-BR.33-V-BR.39-[Apiserver] customize audit config of apiservers [Disruptive][Slow]", func() { var ( patchAllRequestBodies = `[{"op": "replace", "path": "/spec/audit", "value":{"profile":"AllRequestBodies"}}]` patchWriteRequestBodies = `[{"op": "replace", "path": "/spec/audit", "value":{"profile":"WriteRequestBodies"}}]` patchToRecover = `[{"op": "replace", "path": "/spec/audit", "value":{"profile":"Default"}}]` podScript = "grep -r '\"managedFields\":{' /var/log/kube-apiserver | wc -l" now = time.Now().UTC() unixTimestamp = now.Unix() ) defer func() { exutil.By("Restoring apiserver/cluster's profile") output := setAuditProfile(oc, "apiserver/cluster", patchToRecover) if strings.Contains(output, "patched (no change)") { e2e.Logf("Apiserver/cluster's audit profile not changed from the default values") } }() exutil.By("1. Checking the current default audit policy of cluster") checkApiserversAuditPolicies(oc, "Default") exutil.By("2. Get all master nodes.") masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) exutil.By("3. Checking verbs in kube-apiserver audit logs") script := fmt.Sprintf(`grep -hE "\"verb\":\"(create|delete|patch|update)\",\"user\":.*(requestObject|responseObject)|\"verb\":\"(get|list|watch)\",\"user\":.*(requestObject|responseObject)" /var/log/kube-apiserver/audit.log | jq -r "select (.requestReceivedTimestamp | .[0:19] + \"Z\" | fromdateiso8601 > %v)" | tail -n 1`, unixTimestamp) masterNodeLogs, errCount := checkAuditLogs(oc, script, masterNodes[0], "openshift-kube-apiserver") if errCount > 0 { e2e.Failf("Verbs in kube-apiserver audit logs on master node %v :: %v", masterNodes[0], masterNodeLogs) } e2e.Logf("No verbs logs in kube-apiserver audit logs on master node %v", masterNodes[0]) exutil.By("4. Set audit profile to WriteRequestBodies") setAuditProfile(oc, "apiserver/cluster", patchWriteRequestBodies) exutil.By("5. Checking the current WriteRequestBodies audit policy of cluster.") checkApiserversAuditPolicies(oc, "WriteRequestBodies") exutil.By("6. Checking verbs and managedFields in kube-apiserver audit logs after audit profile to WriteRequestBodies") masterNodeLogs, errCount = checkAuditLogs(oc, script, masterNodes[0], "openshift-kube-apiserver") if errCount == 0 { e2e.Failf("Post audit profile to WriteRequestBodies, No Verbs in kube-apiserver audit logs on master node %v :: %v :: %v", masterNodes[0], masterNodeLogs, errCount) } podsList := getPodsListByLabel(oc.AsAdmin(), "openshift-kube-apiserver", "app=openshift-kube-apiserver") execKasOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-kube-apiserver", podScript) trimOutput := strings.TrimSpace(execKasOuptut) count, _ := strconv.Atoi(trimOutput) if count == 0 { e2e.Logf("The step succeeded and the managedFields count is zero in KAS logs.") } else { e2e.Failf("The step Failed and the managedFields count is not zero in KAS logs :: %d.", count) } e2e.Logf("Post audit profile to WriteRequestBodies, verbs captured in kube-apiserver audit logs on master node %v", masterNodes[0]) exutil.By("7. Set audit profile to AllRequestBodies") setAuditProfile(oc, "apiserver/cluster", patchAllRequestBodies) exutil.By("8. Checking the current AllRequestBodies audit policy of cluster.") checkApiserversAuditPolicies(oc, "AllRequestBodies") exutil.By("9. Checking verbs and managedFields in kube-apiserver audit logs after audit profile to AllRequestBodies") masterNodeLogs, errCount = checkAuditLogs(oc, script, masterNodes[0], "openshift-kube-apiserver") if errCount == 0 { e2e.Failf("Post audit profile to AllRequestBodies, No Verbs in kube-apiserver audit logs on master node %v :: %v", masterNodes[0], masterNodeLogs) } execKasOuptut = ExecCommandOnPod(oc, podsList[0], "openshift-kube-apiserver", podScript) trimOutput = strings.TrimSpace(execKasOuptut) count, _ = strconv.Atoi(trimOutput) if count == 0 { e2e.Logf("The step succeeded and the managedFields count is zero in KAS logs.") } else { e2e.Failf("The step Failed and the managedFields count is not zero in KAS logs :: %d.", count) } e2e.Logf("Post audit profile to AllRequestBodies, Verbs captured in kube-apiserver audit logs on master node %v", masterNodes[0]) })
test case
openshift/openshift-tests-private
d60dbb97-96a4-4ec6-800c-fb89bcd81261
Author:kewang-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11289-[Apiserver] Check the imagestreams of quota in the project after build image [Serial]
['"bufio"', '"context"', '"fmt"', '"os"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11289-[Apiserver] Check the imagestreams of quota in the project after build image [Serial]", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } var ( caseID = "ocp-11289" dirname = "/tmp/-" + caseID ocpObjectCountsYamlFile = dirname + "openshift-object-counts.yaml" expectedQuota = "openshift.io/imagestreams:2" ) exutil.By("1) Create a new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() exutil.By("2) Create a ResourceQuota count of image stream") ocpObjectCountsYaml := `apiVersion: v1 kind: ResourceQuota metadata: name: openshift-object-counts spec: hard: openshift.io/imagestreams: "10" ` f, err := os.Create(ocpObjectCountsYamlFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = fmt.Fprintf(w, "%s", ocpObjectCountsYaml) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) defer oc.AsAdmin().Run("delete").Args("-f", ocpObjectCountsYamlFile, "-n", namespace).Execute() quotaErr := oc.AsAdmin().Run("create").Args("-f", ocpObjectCountsYamlFile, "-n", namespace).Execute() o.Expect(quotaErr).NotTo(o.HaveOccurred()) exutil.By("3. Checking the created Resource Quota of the Image Stream") quota := getResourceToBeReady(oc, asAdmin, withoutNamespace, "quota", "openshift-object-counts", `--template={{.status.used}}`, "-n", namespace) o.Expect(quota).Should(o.ContainSubstring("openshift.io/imagestreams:0"), "openshift-object-counts") checkImageStreamQuota := func(buildName string, step string) { buildErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) { bs := getResourceToBeReady(oc, asAdmin, withoutNamespace, "builds", buildName, "-ojsonpath={.status.phase}", "-n", namespace) if strings.Contains(bs, "Complete") { e2e.Logf("Building of %s status:%v", buildName, bs) return true, nil } e2e.Logf("Building of %s is still not complete, continue to monitor ...", buildName) return false, nil }) exutil.AssertWaitPollNoErr(buildErr, fmt.Sprintf("ERROR: Build status of %s is not complete!", buildName)) exutil.By(fmt.Sprintf("%s.1 Checking the created Resource Quota of the Image Stream", step)) quota := getResourceToBeReady(oc, asAdmin, withoutNamespace, "quota", "openshift-object-counts", `--template={{.status.used}}`, "-n", namespace) if !strings.Contains(quota, expectedQuota) { out, _ := getResource(oc, asAdmin, withoutNamespace, "imagestream", "-n", namespace) e2e.Logf("imagestream are used: %s", out) e2e.Failf("expected quota openshift-object-counts %s doesn't match the reality %s! Please check!", expectedQuota, quota) } } exutil.By("4. Create a source build using source code and check the build info") imgErr := oc.AsAdmin().WithoutNamespace().Run("new-build").Args(`quay.io/openshifttest/ruby-27:1.2.0~https://github.com/sclorg/ruby-ex.git`, "-n", namespace, "--import-mode=PreserveOriginal").Execute() if imgErr != nil { if !isConnectedInternet(oc) { e2e.Failf("Failed to access to the internet, something wrong with the connectivity of the cluster! Please check!") } } o.Expect(imgErr).NotTo(o.HaveOccurred()) checkImageStreamQuota("ruby-ex-1", "4") exutil.By("5. Starts a new build for the provided build config") sbErr := oc.AsAdmin().WithoutNamespace().Run("start-build").Args("ruby-ex", "-n", namespace).Execute() o.Expect(sbErr).NotTo(o.HaveOccurred()) checkImageStreamQuota("ruby-ex-2", "5") })
test case
openshift/openshift-tests-private
868fa2db-ae6f-403a-89aa-7acb5af83668
Author:dpunia-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-43336-V-BR.33-V-BR.39-Support customRules list for by-group profiles to the audit configuration [Disruptive][Slow]
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-43336-V-BR.33-V-BR.39-Support customRules list for by-group profiles to the audit configuration [Disruptive][Slow]", func() { var ( patchCustomRules string auditEventCount int users []User usersHTpassFile string htPassSecret string ) defer func() { contextErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", "admin").Execute() o.Expect(contextErr).NotTo(o.HaveOccurred()) contextOutput, contextErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-context").Output() o.Expect(contextErr).NotTo(o.HaveOccurred()) e2e.Logf("Context after rollback :: %v", contextOutput) //Reset customRules profile to default one. output := setAuditProfile(oc, "apiserver/cluster", `[{"op": "remove", "path": "/spec/audit"}]`) if strings.Contains(output, "patched (no change)") { e2e.Logf("Apiserver/cluster's audit profile not changed from the default values") } userCleanup(oc, users, usersHTpassFile, htPassSecret) }() // Get user detail used by the test and cleanup after execution. users, usersHTpassFile, htPassSecret = getNewUser(oc, 2) exutil.By("1. Configure audit config for customRules system:authenticated:oauth profile as Default and audit profile as None") patchCustomRules = `[{"op": "replace", "path": "/spec/audit", "value": {"customRules": [ {"group": "system:authenticated:oauth","profile": "Default"}],"profile": "None"}}]` setAuditProfile(oc, "apiserver/cluster", patchCustomRules) exutil.By("2. Check audit events should be greater than zero after login operation") err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { _, auditEventCount = checkUserAuditLog(oc, "system:authenticated:oauth", users[0].Username, users[0].Password) if auditEventCount > 0 { return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Test Case failed :: Audit events count is not greater than zero after login operation :: %v", auditEventCount)) exutil.By("3. Configure audit config for customRules system:authenticated:oauth profile as Default & system:serviceaccounts:openshift-console-operator as WriteRequestBodies and audit profile as None") patchCustomRules = `[{"op": "replace", "path": "/spec/audit", "value": {"customRules": [ {"group": "system:authenticated:oauth","profile": "Default"}, {"group": "system:serviceaccounts:openshift-console-operator","profile": "WriteRequestBodies"}],"profile": "None"}}]` setAuditProfile(oc, "apiserver/cluster", patchCustomRules) exutil.By("4. Check audit events should be greater than zero after login operation") err1 := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 180*time.Second, false, func(cxt context.Context) (bool, error) { _, auditEventCount = checkUserAuditLog(oc, "system:authenticated:oauth", users[1].Username, users[1].Password) if auditEventCount > 0 { return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err1, fmt.Sprintf("Test Case failed :: Audit events count is not greater than zero after login operation :: %v", auditEventCount)) _, auditEventCount = checkUserAuditLog(oc, "system:serviceaccounts:openshift-console-operator", users[1].Username, users[1].Password) o.Expect(auditEventCount).To(o.BeNumerically(">", 0)) })
test case
openshift/openshift-tests-private
170d1947-52d8-4bad-abf6-f8f3ad55fdd6
Author:dpunia-ROSA-ARO-OSD_CCS-ConnectedOnly-High-11887-Could delete all the resource when deleting the project [Serial]
['"context"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-ROSA-ARO-OSD_CCS-ConnectedOnly-High-11887-Could delete all the resource when deleting the project [Serial]", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } origContxt, contxtErr := oc.Run("config").Args("current-context").Output() o.Expect(contxtErr).NotTo(o.HaveOccurred()) defer func() { useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute() o.Expect(useContxtErr).NotTo(o.HaveOccurred()) }() exutil.By("1) Create a project") projectName := "project-11887" defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", projectName, "--ignore-not-found").Execute() err := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(projectName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2) Create new app") err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-openshift", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", projectName, "--import-mode=PreserveOriginal").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3) Build hello-world from external source") helloWorldSource := "quay.io/openshifttest/ruby-27:1.2.0~https://github.com/openshift/ruby-hello-world" imageError := oc.Run("new-build").Args(helloWorldSource, "--name=ocp-11887-test-"+strings.ToLower(exutil.RandStr(5)), "-n", projectName, "--import-mode=PreserveOriginal").Execute() if imageError != nil { if !isConnectedInternet(oc) { e2e.Failf("Failed to access to the internet, something wrong with the connectivity of the cluster! Please check!") } } exutil.By("4) Get project resource") for _, resource := range []string{"buildConfig", "deployments", "pods", "services"} { out := getResourceToBeReady(oc, asAdmin, withoutNamespace, resource, "-n", projectName, "-o=jsonpath={.items[*].metadata.name}") o.Expect(len(out)).To(o.BeNumerically(">", 0)) } exutil.By("5) Delete the project") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", projectName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5.1) Check project is deleted") err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("project", projectName).Output() if matched, _ := regexp.MatchString("namespaces .* not found", out); matched { e2e.Logf("Step 5.1. Test Passed, project is deleted") return true, nil } // Adding logging for debug e2e.Logf("Project delete is in progress :: %s", out) return false, nil }) exutil.AssertWaitPollNoErr(err, "Step 5.1. Test Failed, Project is not deleted") exutil.By("6) Get project resource after project is deleted") out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", projectName, "all", "--no-headers").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("No resources found")) exutil.By("7) Create a project with same name, no context for this new one") err = oc.AsAdmin().WithoutNamespace().Run("new-project").Args(projectName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) out, err = oc.AsAdmin().WithoutNamespace().Run("status").Args("-n", projectName).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("no services, deployment")) })
test case
openshift/openshift-tests-private
05feeb06-137f-4fb9-93b2-91ba723be0bb
Author:dpunia-WRS-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Longduration-NonPreRelease-High-63273-V-CM.03-V-CM.04-Test etcd encryption migration [Slow][Disruptive]
['"context"', '"fmt"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-WRS-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Longduration-NonPreRelease-High-63273-V-CM.03-V-CM.04-Test etcd encryption migration [Slow][Disruptive]", func() { // only run this case in Etcd Encryption On cluster exutil.By("1) Check if cluster is Etcd Encryption On") encryptionType, err := oc.WithoutNamespace().Run("get").Args("apiserver/cluster", "-o=jsonpath={.spec.encryption.type}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if encryptionType != "aescbc" && encryptionType != "aesgcm" { g.Skip("The cluster is Etcd Encryption Off, this case intentionally runs nothing") } e2e.Logf("Etcd Encryption with type %s is on!", encryptionType) exutil.By("2) Check encryption-config and key secrets before Migration") encSecretOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", "openshift-config-managed", "-l", "encryption.apiserver.operator.openshift.io/component", "-o", `jsonpath={.items[*].metadata.name}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) encSecretCount := strings.Count(encSecretOut, "encryption") o.Expect(encSecretCount).To(o.BeNumerically(">", 0)) exutil.By("3) Create Secret & Check in etcd database before Migration") defer oc.WithoutNamespace().Run("delete").Args("-n", "default", "secret", "secret-63273").Execute() err = oc.WithoutNamespace().Run("create").Args("-n", "default", "secret", "generic", "secret-63273", "--from-literal", "pass=secret123").Execute() o.Expect(err).NotTo(o.HaveOccurred()) etcdPods := getPodsListByLabel(oc, "openshift-etcd", "etcd=true") execCmdOutput := ExecCommandOnPod(oc, etcdPods[0], "openshift-etcd", "etcdctl get /kubernetes.io/secrets/default/secret-63273") o.Expect(execCmdOutput).ShouldNot(o.ContainSubstring("secret123")) exutil.By("4) Migrate encryption if current encryption is aescbc to aesgcm or vice versa") migrateEncTo := "aesgcm" if encryptionType == "aesgcm" { migrateEncTo = "aescbc" } oasEncNumber, err := GetEncryptionKeyNumber(oc, `encryption-key-openshift-apiserver-[^ ]*`) o.Expect(err).NotTo(o.HaveOccurred()) kasEncNumber, err1 := GetEncryptionKeyNumber(oc, `encryption-key-openshift-kube-apiserver-[^ ]*`) o.Expect(err1).NotTo(o.HaveOccurred()) e2e.Logf("Starting Etcd Encryption migration to %v", migrateEncTo) patchArg := fmt.Sprintf(`{"spec":{"encryption": {"type":"%v"}}}`, migrateEncTo) encMigrateOut, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=merge", "-p", patchArg).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(encMigrateOut).Should(o.ContainSubstring("patched")) exutil.By("5.) Check the new encryption key secrets appear") newOASEncSecretName := "encryption-key-openshift-apiserver-" + strconv.Itoa(oasEncNumber+1) newKASEncSecretName := "encryption-key-openshift-kube-apiserver-" + strconv.Itoa(kasEncNumber+1) err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { output, err := oc.WithoutNamespace().Run("get").Args("secrets", newOASEncSecretName, newKASEncSecretName, "-n", "openshift-config-managed").Output() if err != nil { e2e.Logf("Fail to get new encryption-key-* secrets, error: %s. Trying again", err) return false, nil } e2e.Logf("Got new encryption-key-* secrets:\n%s", output) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("new encryption key secrets %s, %s not found", newOASEncSecretName, newKASEncSecretName)) completed, errOAS := WaitEncryptionKeyMigration(oc, newOASEncSecretName) exutil.AssertWaitPollNoErr(errOAS, fmt.Sprintf("saw all migrated-resources for %s", newOASEncSecretName)) o.Expect(completed).Should(o.Equal(true)) completed, errKas := WaitEncryptionKeyMigration(oc, newKASEncSecretName) exutil.AssertWaitPollNoErr(errKas, fmt.Sprintf("saw all migrated-resources for %s", newKASEncSecretName)) o.Expect(completed).Should(o.Equal(true)) e2e.Logf("Checking kube-apiserver operator should be Available") err = waitCoBecomes(oc, "kube-apiserver", 1500, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available") exutil.By("6) Check secret in etcd after Migration") etcdPods = getPodsListByLabel(oc, "openshift-etcd", "etcd=true") execCmdOutput = ExecCommandOnPod(oc, etcdPods[0], "openshift-etcd", "etcdctl get /kubernetes.io/secrets/default/secret-63273") o.Expect(execCmdOutput).ShouldNot(o.ContainSubstring("secret123")) })
test case
openshift/openshift-tests-private
b8836f3e-1186-4877-99d2-83001c93f064
Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Low-12036-APIServer User can pull a private image from a registry when a pull secret is defined [Serial]
['"fmt"', '"net/http"', '"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Low-12036-APIServer User can pull a private image from a registry when a pull secret is defined [Serial]", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } architecture.SkipArchitectures(oc, architecture.MULTI) exutil.By("1) Create a new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() exutil.By("2) Build hello-world from external source") helloWorldSource := "quay.io/openshifttest/ruby-27:1.2.0~https://github.com/openshift/ruby-hello-world" buildName := fmt.Sprintf("ocp12036-test-%s", strings.ToLower(exutil.RandStr(5))) err := oc.Run("new-build").Args(helloWorldSource, "--name="+buildName, "-n", namespace, "--import-mode=PreserveOriginal").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3) Wait for hello-world build to success") buildClient := oc.BuildClient().BuildV1().Builds(oc.Namespace()) err = exutil.WaitForABuild(buildClient, buildName+"-1", nil, nil, nil) if err != nil { exutil.DumpBuildLogs(buildName, oc) } exutil.AssertWaitPollNoErr(err, "build is not complete") exutil.By("4) Get dockerImageRepository value from imagestreams test") dockerImageRepository1, err := oc.Run("get").Args("imagestreams", buildName, "-o=jsonpath={.status.dockerImageRepository}").Output() o.Expect(err).NotTo(o.HaveOccurred()) dockerServer := strings.Split(strings.TrimSpace(dockerImageRepository1), "/") o.Expect(dockerServer).NotTo(o.BeEmpty()) exutil.By("5) Create another project with the second user") oc.SetupProject() exutil.By("6) Get access token") token, err := oc.Run("whoami").Args("-t").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("7) Give user admin permission") username := oc.Username() err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", username).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("8) Create secret for private image under project") err = oc.WithoutNamespace().AsAdmin().Run("create").Args("secret", "docker-registry", "user1-dockercfg", "[email protected]", "--docker-server="+dockerServer[0], "--docker-username="+username, "--docker-password="+token, "-n", oc.Namespace()).NotShowInfo().Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9) Create new deploymentconfig from the dockerImageRepository fetched in step 4") deploymentConfigYaml, err := oc.Run("create").Args("deploymentconfig", "frontend", "--image="+dockerImageRepository1, "--dry-run=client", "-o=yaml").OutputToFile("ocp12036-dc.yaml") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("10) Modify the deploymentconfig and create a new deployment.") exutil.ModifyYamlFileContent(deploymentConfigYaml, []exutil.YamlReplace{ { Path: "spec.template.spec.containers.0.imagePullPolicy", Value: "Always", }, { Path: "spec.template.spec.imagePullSecrets", Value: "- name: user1-dockercfg", }, }) err = oc.Run("create").Args("-f", deploymentConfigYaml).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("11) Check if pod is properly running with expected status.") podsList := getPodsListByLabel(oc.AsAdmin(), oc.Namespace(), "deploymentconfig=frontend") exutil.AssertPodToBeReady(oc, podsList[0], oc.Namespace()) })
test case
openshift/openshift-tests-private
62abf72f-a93e-49df-8c0a-54b4c9443230
Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11905-APIServer Use well-formed pull secret with incorrect credentials will fail to build and deploy [Serial]
['"context"', '"fmt"', '"net/http"', '"regexp"', '"strings"', '"time"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11905-APIServer Use well-formed pull secret with incorrect credentials will fail to build and deploy [Serial]", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } architecture.SkipArchitectures(oc, architecture.MULTI) exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } exutil.By("1) Create a new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() exutil.By("2) Build hello-world from external source") helloWorldSource := "quay.io/openshifttest/ruby-27:1.2.0~https://github.com/openshift/ruby-hello-world" buildName := fmt.Sprintf("ocp11905-test-%s", strings.ToLower(exutil.RandStr(5))) err := oc.Run("new-build").Args(helloWorldSource, "--name="+buildName, "-n", namespace, "--import-mode=PreserveOriginal").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3) Wait for hello-world build to success") buildClient := oc.BuildClient().BuildV1().Builds(oc.Namespace()) err = exutil.WaitForABuild(buildClient, buildName+"-1", nil, nil, nil) if err != nil { exutil.DumpBuildLogs(buildName, oc) } exutil.AssertWaitPollNoErr(err, "build is not complete") exutil.By("4) Get dockerImageRepository value from imagestreams test") dockerImageRepository1, err := oc.Run("get").Args("imagestreams", buildName, "-o=jsonpath={.status.dockerImageRepository}").Output() o.Expect(err).NotTo(o.HaveOccurred()) dockerServer := strings.Split(strings.TrimSpace(dockerImageRepository1), "/") o.Expect(dockerServer).NotTo(o.BeEmpty()) exutil.By("5) Create another project with the second user") oc.SetupProject() exutil.By("6) Give user admin permission") username := oc.Username() err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", username).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("7) Create secret for private image under project with wrong password") err = oc.WithoutNamespace().AsAdmin().Run("create").Args("secret", "docker-registry", "user1-dockercfg", "[email protected]", "--docker-server="+dockerServer[0], "--docker-username="+username, "--docker-password=password", "-n", oc.Namespace()).NotShowInfo().Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("8) Create new deploymentconfig from the dockerImageRepository fetched in step 4") deploymentConfigYaml, err := oc.Run("create").Args("deploymentconfig", "frontend", "--image="+dockerImageRepository1, "--dry-run=client", "-o=yaml").OutputToFile("ocp12036-dc.yaml") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9) Modify the deploymentconfig and create a new deployment.") exutil.ModifyYamlFileContent(deploymentConfigYaml, []exutil.YamlReplace{ { Path: "spec.template.spec.containers.0.imagePullPolicy", Value: "Always", }, { Path: "spec.template.spec.imagePullSecrets", Value: "- name: user1-dockercfg", }, }) err = oc.Run("create").Args("-f", deploymentConfigYaml).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("10) Check if pod is running with the expected status.") err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { podOutput, err := oc.Run("get").Args("pod").Output() if err == nil { matched, _ := regexp.MatchString("frontend-1-.*(ImagePullBackOff|ErrImagePull)", podOutput) if matched { e2e.Logf("Pod is running with expected status\n%s", podOutput) return true, nil } } return false, nil }) exutil.AssertWaitPollNoErr(err, "pod did not showed up with the expected status") })
test case
openshift/openshift-tests-private
9960df5b-baf6-4ed1-b29c-5e346f9cc150
Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11531-APIServer Can access both http and https pods and services via the API proxy [Serial]
['"fmt"', '"net/http"', '"os/exec"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11531-APIServer Can access both http and https pods and services via the API proxy [Serial]", func() { exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } // Case is failing on which cluster dns is not resolvable ... apiServerFQDN, _ := getApiServerFQDNandPort(oc, false) cmd := fmt.Sprintf(`nslookup %s`, apiServerFQDN) nsOutput, nsErr := exec.Command("bash", "-c", cmd).Output() if nsErr != nil { g.Skip(fmt.Sprintf("DNS resolution failed, case is not suitable for environment %s :: %s", nsOutput, nsErr)) } exutil.By("1) Create a new project required for this test execution") oc.SetupProject() projectNs := oc.Namespace() exutil.By("2. Get the clustername") clusterName, clusterErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("view", "-o", `jsonpath={.clusters[0].name}`).Output() o.Expect(clusterErr).NotTo(o.HaveOccurred()) e2e.Logf("Cluster Name :: %v", clusterName) exutil.By("3. Point to the API server referring the cluster name") apiserverName, apiErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("view", "-o", `jsonpath={.clusters[?(@.name=="`+clusterName+`")].cluster.server}`).Output() o.Expect(apiErr).NotTo(o.HaveOccurred()) e2e.Logf("Server Name :: %v", apiserverName) exutil.By("4) Get access token") token, err := oc.Run("whoami").Args("-t").Output() o.Expect(err).NotTo(o.HaveOccurred()) // Define the URL values urls := []struct { URL string Target string ExpectStr string }{ { URL: "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", Target: "hello-openshift", ExpectStr: "Hello OpenShift!", }, { URL: "quay.io/openshifttest/nginx-alpine@sha256:f78c5a93df8690a5a937a6803ef4554f5b6b1ef7af4f19a441383b8976304b4c", Target: "nginx-alpine", ExpectStr: "Hello-OpenShift nginx", }, } for i, u := range urls { exutil.By(fmt.Sprintf("%d.1) Build "+u.Target+" from external source", i+5)) appErr := oc.AsAdmin().WithoutNamespace().Run("new-app").Args(u.URL, "-n", projectNs, "--import-mode=PreserveOriginal").Execute() o.Expect(appErr).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("%d.2) Check if pod is properly running with expected status.", i+5)) podsList := getPodsListByLabel(oc.AsAdmin(), projectNs, "deployment="+u.Target) exutil.AssertPodToBeReady(oc, podsList[0], projectNs) exutil.By(fmt.Sprintf("%d.3) Perform the proxy GET request to resource REST endpoint with service", i+5)) curlUrl := fmt.Sprintf(`%s/api/v1/namespaces/%s/services/http:%s:8080-tcp/proxy/`, apiserverName, projectNs, u.Target) output := clientCurl(token, curlUrl) o.Expect(output).Should(o.ContainSubstring(u.ExpectStr)) exutil.By(fmt.Sprintf("%d.4) Perform the proxy GET request to resource REST endpoint with pod", i+5)) curlUrl = fmt.Sprintf(`%s/api/v1/namespaces/%s/pods/http:%s:8080/proxy`, apiserverName, projectNs, podsList[0]) output = clientCurl(token, curlUrl) o.Expect(output).Should(o.ContainSubstring(u.ExpectStr)) } })
test case
openshift/openshift-tests-private
189cab93-76f9-4581-a82b-8b99fb61b597
Author:dpunia-ROSA-ARO-OSD_CCS-High-12193-APIServer User can get node selector from a project
['"context"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-ROSA-ARO-OSD_CCS-High-12193-APIServer User can get node selector from a project", func() { var ( caseID = "ocp-12193" firstProject = "e2e-apiserver-first" + caseID + "-" + exutil.GetRandomString() secondProject = "e2e-apiserver-second" + caseID + "-" + exutil.GetRandomString() labelValue = "qa" + exutil.GetRandomString() ) oc.SetupProject() userName := oc.Username() exutil.By("Pre-requisities, capturing current-context from cluster.") origContxt, contxtErr := oc.Run("config").Args("current-context").Output() o.Expect(contxtErr).NotTo(o.HaveOccurred()) defer func() { useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute() o.Expect(useContxtErr).NotTo(o.HaveOccurred()) }() exutil.By("1) Create a project without node selector") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", firstProject).Execute() err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("new-project", firstProject, "--admin="+userName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2) Create a project with node selector") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", secondProject).Execute() err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("new-project", secondProject, "--node-selector=env="+labelValue, "--description=testnodeselector", "--admin="+userName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3) Check node selector field for above 2 projects") firstProjectOut, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("project", firstProject, "--as="+userName).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(firstProjectOut).Should(o.MatchRegexp("Node Selector:.*<none>")) secondProjectOut, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("project", secondProject, "--as="+userName).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(secondProjectOut).Should(o.MatchRegexp("Node Selector:.*env=" + labelValue)) })
test case
openshift/openshift-tests-private
30a27e8b-42e6-4903-b883-576be60d3b3a
Author:kewang-ROSA-ARO-OSD_CCS-HyperShiftMGMT-High-65924-Specifying non-existen secret for API namedCertificates renders inconsistent config [Disruptive]
['"encoding/json"', '"fmt"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-ROSA-ARO-OSD_CCS-HyperShiftMGMT-High-65924-Specifying non-existen secret for API namedCertificates renders inconsistent config [Disruptive]", func() { // Currently, there is one bug OCPBUGS-15853 on 4.13, after the related PRs are merged, consider back-porting the case to 4.13 var ( apiserver = "apiserver/cluster" kas = "openshift-kube-apiserver" kasOpExpectedStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} kasOpNewStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "True"} apiServerFQDN, _ = getApiServerFQDNandPort(oc, false) patch = fmt.Sprintf(`{"spec":{"servingCerts": {"namedCertificates": [{"names": ["%s"], "servingCertificate": {"name": "client-ca-cusom"}}]}}}`, apiServerFQDN) patchToRecover = `[{ "op": "remove", "path": "/spec/servingCerts" }]` ) defer func() { exutil.By(" Last) Check the kube-apiserver cluster operator after removed the non-existen secret for API namedCertificates .") err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(apiserver, "-p", patchToRecover, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = waitCoBecomes(oc, "kube-apiserver", 300, kasOpExpectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available") }() exutil.By("1) Get the current revision of openshift-kube-apiserver.") out, revisionChkErr := oc.AsAdmin().Run("get").Args("po", "-n", kas, "-l=apiserver", "-o", "jsonpath={.items[*].metadata.labels.revision}").Output() o.Expect(revisionChkErr).NotTo(o.HaveOccurred()) s := strings.Split(out, " ") preRevisionSum := 0 for _, valueStr := range s { valueInt, _ := strconv.Atoi(valueStr) preRevisionSum += valueInt } e2e.Logf("Current revisions of kube-apiservers: %v", out) exutil.By("2) Apply non-existen secret for API namedCertificates.") err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(apiserver, "-p", patch, "--type=merge").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3) Wait for a while and check the status of kube-apiserver cluster operator.") errCo := waitCoBecomes(oc, "kube-apiserver", 300, kasOpNewStatus) exutil.AssertWaitPollNoErr(errCo, "kube-apiserver operator is not becomes degraded") output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "kube-apiserver").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("ConfigObservationDegraded")) exutil.By("4) Check that cluster does nothing and no kube-server pod crash-looping.") out, revisionChkErr = oc.AsAdmin().Run("get").Args("po", "-n", kas, "-l=apiserver", "-o", "jsonpath={.items[*].metadata.labels.revision}").Output() o.Expect(revisionChkErr).NotTo(o.HaveOccurred()) s1 := strings.Split(out, " ") postRevisionSum := 0 for _, valueStr := range s1 { valueInt, _ := strconv.Atoi(valueStr) postRevisionSum += valueInt } e2e.Logf("Revisions of kube-apiservers after patching: %v", out) o.Expect(postRevisionSum).Should(o.BeNumerically("==", preRevisionSum), "Validation failed as PostRevision value not equal to PreRevision") e2e.Logf("No changes on revisions of kube-apiservers.") kasPodsOutput := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pods", "-l apiserver", "--no-headers", "-n", kas) o.Expect(kasPodsOutput).ShouldNot(o.ContainSubstring("CrashLoopBackOff")) e2e.Logf("Kube-apiservers didn't roll out as expected.") })
test case
openshift/openshift-tests-private
e4d7cc8d-489b-4d35-8074-4f941e134f51
Author:rgangwar-NonHyperShiftHOST-NonPreRelease-Longduration-ROSA-ARO-OSD_CCS-Medium-66921-1-APIServer LatencySensitive featureset must be removed [Slow][Disruptive]
['"encoding/json"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-NonPreRelease-Longduration-ROSA-ARO-OSD_CCS-Medium-66921-1-APIServer LatencySensitive featureset must be removed [Slow][Disruptive]", func() { const ( featurePatch = `[{"op": "replace", "path": "/spec/featureSet", "value": "LatencySensitive"}]` invalidFeatureGate = `[{"op": "replace", "path": "/spec/featureSet", "value": "unknown"}]` ) exutil.By("Checking feature gate") output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregates", "-o", `jsonpath={.items[0].spec.featureSet}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) if output != "" { g.Skip("Skipping case as feature gate is already enabled, can't modify or undo feature gate.") } exutil.By("1. Set Invalid featuregate") output, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", invalidFeatureGate).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(`The FeatureGate "cluster" is invalid`)) e2e.Logf("Error message :: %s", output) // It is removed in 4.17, detail see https://github.com/openshift/cluster-config-operator/pull/324 exutil.By("2. Set featuregate to LatencySensitive") output, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", featurePatch).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(`The FeatureGate "cluster" is invalid`)) })
test case
openshift/openshift-tests-private
33cf6489-747d-431d-8241-3dfb098319c6
Author:rgangwar-NonHyperShiftHOST-NonPreRelease-Longduration-ROSA-ARO-OSD_CCS-Medium-66921-2-APIServer TechPreviewNoUpgrade featureset blocks upgrade [Slow][Disruptive]
['"encoding/json"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-NonPreRelease-Longduration-ROSA-ARO-OSD_CCS-Medium-66921-2-APIServer TechPreviewNoUpgrade featureset blocks upgrade [Slow][Disruptive]", func() { const ( featureTechPreview = `[{"op": "replace", "path": "/spec/featureSet", "value": "TechPreviewNoUpgrade"}]` featureCustomNoUpgrade = `[{"op": "replace", "path": "/spec/featureSet", "value": "CustomNoUpgrade"}]` ) exutil.By("1. Checking feature gate") g.By("Check if the cluster is TechPreviewNoUpgrade") if !isTechPreviewNoUpgrade(oc) { g.Skip("Skip for featuregate set as TechPreviewNoUpgrade") } exutil.By("2. Set featuregate to TechPreviewNoUpgrade again") output, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", featureTechPreview).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(`featuregate.config.openshift.io/cluster patched (no change)`)) kasOpExpectedStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} err = waitCoBecomes(oc, "kube-apiserver", 300, kasOpExpectedStatus) exutil.AssertWaitPollNoErr(err, "changes of status have occurred to the kube-apiserver operator") exutil.By("3. Check featuregate after set to CustomNoUpgrade") output, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", featureCustomNoUpgrade).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(`The FeatureGate "cluster" is invalid: spec.featureSet: Invalid value: "string": TechPreviewNoUpgrade may not be changed`)) })
test case
openshift/openshift-tests-private
1078e8e5-18d9-4d24-94fd-a702eb06d35f
Author:kewang-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11797-[Apiserver] Image with single or multiple layer(s) sumed up size slightly exceed the openshift.io/image-size will push failed
['"bufio"', '"context"', '"fmt"', '"net/http"', '"os"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-11797-[Apiserver] Image with single or multiple layer(s) sumed up size slightly exceed the openshift.io/image-size will push failed", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig") && isEnabledCapability(oc, "ImageRegistry")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } var ( imageLimitRangeYamlFile = tmpdir + "image-limit-range.yaml" imageLimitRangeYaml = fmt.Sprintf(`apiVersion: v1 kind: LimitRange metadata: name: openshift-resource-limits spec: limits: - type: openshift.io/Image max: storage: %s - type: openshift.io/ImageStream max: openshift.io/image-tags: 20 openshift.io/images: 30 `, "100Mi") ) exutil.By("1) Create new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() exutil.By("2) Create a resource quota limit of the image") f, err := os.Create(imageLimitRangeYamlFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = w.WriteString(imageLimitRangeYaml) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) defer oc.AsAdmin().Run("delete").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() quotaErr := oc.AsAdmin().Run("create").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() o.Expect(quotaErr).NotTo(o.HaveOccurred()) exutil.By(`3) Using "skopeo" tool to copy image from quay registry to the default internal registry of the cluster`) destRegistry := "docker://" + defaultRegistryServiceURL + "/" + namespace + "/mystream:latest" exutil.By(`3.1) Try copying multiple layers image to the default internal registry of the cluster`) publicImageUrl := "docker://" + "quay.io/openshifttest/mysql:1.2.0" var output string errPoll := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 200*time.Second, false, func(cxt context.Context) (bool, error) { output, err = copyImageToInternelRegistry(oc, namespace, publicImageUrl, destRegistry) if err != nil { if strings.Contains(output, "denied") { o.Expect(strings.Contains(output, "denied")).Should(o.BeTrue(), "Should deny copying"+publicImageUrl) return true, nil } } return false, nil }) if errPoll != nil { e2e.Logf("Failed to retrieve %v", output) exutil.AssertWaitPollNoErr(errPoll, "Failed to retrieve") } exutil.By(`3.2) Try copying single layer image to the default internal registry of the cluster`) publicImageUrl = "docker://" + "quay.io/openshifttest/singlelayer:latest" errPoll = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 200*time.Second, false, func(cxt context.Context) (bool, error) { output, err = copyImageToInternelRegistry(oc, namespace, publicImageUrl, destRegistry) if err != nil { if strings.Contains(output, "denied") { o.Expect(strings.Contains(output, "denied")).Should(o.BeTrue(), "Should deny copying"+publicImageUrl) return true, nil } } return false, nil }) if errPoll != nil { e2e.Logf("Failed to retrieve %v", output) exutil.AssertWaitPollNoErr(errPoll, "Failed to retrieve") } })
test case
openshift/openshift-tests-private
721a19de-3b0d-4465-b282-7049c2388ccd
Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-10865-[Apiserver] After Image Size Limit increment can push the image which previously over the limit
['"bufio"', '"context"', '"fmt"', '"net/http"', '"os"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-10865-[Apiserver] After Image Size Limit increment can push the image which previously over the limit", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig") && isEnabledCapability(oc, "ImageRegistry")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } imageLimitRangeYamlFile := tmpdir + "image-limit-range.yaml" exutil.By("1) Create new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() defer oc.AsAdmin().Run("delete").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() for i, storage := range []string{"16Mi", "1Gi"} { // Use fmt.Sprintf to update the storage value dynamically imageLimitRangeYaml := fmt.Sprintf(`apiVersion: v1 kind: LimitRange metadata: name: openshift-resource-limits spec: limits: - type: openshift.io/Image max: storage: %s - type: openshift.io/ImageStream max: openshift.io/image-tags: 20 openshift.io/images: 30 `, storage) exutil.By(fmt.Sprintf("%d.1) Create a resource quota limit of the image with storage limit %s", i+1, storage)) f, err := os.Create(imageLimitRangeYamlFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = w.WriteString(imageLimitRangeYaml) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) // Define the action (create or replace) based on the storage value var action string if storage == "16Mi" { action = "create" } else if storage == "1Gi" { action = "replace" } quotaErr := oc.AsAdmin().Run(action).Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() o.Expect(quotaErr).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf(`%d.2) Using "skopeo" tool to copy image from quay registry to the default internal registry of the cluster`, i+1)) destRegistry := "docker://" + defaultRegistryServiceURL + "/" + namespace + "/mystream:latest" exutil.By(fmt.Sprintf(`%d.3) Try copying image to the default internal registry of the cluster`, i+1)) publicImageUrl := "docker://quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c" var output string errPoll := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err = copyImageToInternelRegistry(oc, namespace, publicImageUrl, destRegistry) if err != nil { if strings.Contains(output, "denied") { o.Expect(strings.Contains(output, "denied")).Should(o.BeTrue(), "Should deny copying"+publicImageUrl) return true, nil } } else if err == nil { return true, nil } return false, nil }) if errPoll != nil { e2e.Logf("Failed to retrieve %v", output) exutil.AssertWaitPollNoErr(errPoll, "Failed to retrieve") } } })
test case
openshift/openshift-tests-private
d299e177-7815-4002-9232-4096d653fb04
Author:dpunia-NonHyperShiftHOST-NonPreRelease-Longduration-ROSA-ARO-OSD_CCS-ConnectedOnly-Low-24389-Verify the CR admission of the APIServer CRD [Slow][Disruptive]
['"crypto/tls"', '"net/http"', '"net/url"', '"os"', '"strings"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-NonHyperShiftHOST-NonPreRelease-Longduration-ROSA-ARO-OSD_CCS-ConnectedOnly-Low-24389-Verify the CR admission of the APIServer CRD [Slow][Disruptive]", func() { var ( patchOut string patchJsonRevert = `{"spec": {"additionalCORSAllowedOrigins": null}}` patchJson = `{ "spec": { "additionalCORSAllowedOrigins": [ "(?i)//127\\.0\\.0\\.1(:|\\z)", "(?i)//localhost(:|\\z)", "(?i)//kubernetes\\.default(:|\\z)", "(?i)//kubernetes\\.default\\.svc\\.cluster\\.local(:|\\z)", "(?i)//kubernetes(:|\\z)", "(?i)//openshift\\.default(:|\\z)", "(?i)//openshift\\.default\\.svc(:|\\z)", "(?i)//openshift\\.default\\.svc\\.cluster\\.local(:|\\z)", "(?i)//kubernetes\\.default\\.svc(:|\\z)", "(?i)//openshift(:|\\z)" ]}}` ) exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } apiServerRecover := func() { errKASO := waitCoBecomes(oc, "kube-apiserver", 100, map[string]string{"Progressing": "True"}) exutil.AssertWaitPollNoErr(errKASO, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") errKASO = waitCoBecomes(oc, "kube-apiserver", 1500, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) exutil.AssertWaitPollNoErr(errKASO, "openshift-kube-apiserver pods revisions recovery not completed") } defer func() { if strings.Contains(patchOut, "patched") { err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver", "cluster", "--type=merge", "-p", patchJsonRevert).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // Wait for kube-apiserver recover apiServerRecover() } }() exutil.By("1) Update apiserver config(additionalCORSAllowedOrigins) with invalid config `no closing (parentheses`") patch := `{"spec": {"additionalCORSAllowedOrigins": ["no closing (parentheses"]}}` patchOut, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver", "cluster", "--type=merge", "-p", patch).Output() o.Expect(err).Should(o.HaveOccurred()) o.Expect(patchOut).Should(o.ContainSubstring(`"no closing (parentheses": not a valid regular expression`)) exutil.By("2) Update apiserver config(additionalCORSAllowedOrigins) with invalid string type") patch = `{"spec": {"additionalCORSAllowedOrigins": "some string"}}` patchOut, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver", "cluster", "--type=merge", "-p", patch).Output() o.Expect(err).Should(o.HaveOccurred()) o.Expect(patchOut).Should(o.ContainSubstring(`body must be of type array: "string"`)) exutil.By("3) Update apiserver config(additionalCORSAllowedOrigins) with valid config") patchOut, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver", "cluster", "--type=merge", "-p", patchJson).Output() o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(patchOut).Should(o.ContainSubstring("patched")) // Wait for kube-apiserver recover apiServerRecover() exutil.By("4) Verifying the additionalCORSAllowedOrigins by inspecting the HTTP response headers") urlStr, err := oc.Run("whoami").Args("--show-server").Output() o.Expect(err).NotTo(o.HaveOccurred()) req, err := http.NewRequest("GET", urlStr, nil) o.Expect(err).NotTo(o.HaveOccurred()) req.Header.Set("Origin", "http://localhost") tr := &http.Transport{} if os.Getenv("HTTPS_PROXY") != "" || os.Getenv("https_proxy") != "" { httpsProxy, err := url.Parse(os.Getenv("https_proxy")) o.Expect(err).NotTo(o.HaveOccurred()) tr.Proxy = http.ProxyURL(httpsProxy) } tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{ Transport: tr, Timeout: time.Second * 30, // Set a timeout for the entire request } resp, err := client.Do(req) o.Expect(err).NotTo(o.HaveOccurred()) defer resp.Body.Close() o.Expect(resp.Header.Get("Access-Control-Allow-Origin")).To(o.Equal("http://localhost")) })
test case
openshift/openshift-tests-private
011f5b09-a8a0-4192-b0cc-ba7d8984b707
Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-12263-[Apiserver] When exceed openshift.io/images will ban to create image reference or push image to project
['"bufio"', '"context"', '"fmt"', '"net/http"', '"os"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'g "github.com/onsi/ginkgo/v2"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-12263-[Apiserver] When exceed openshift.io/images will ban to create image reference or push image to project", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig") && isEnabledCapability(oc, "ImageRegistry")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } var ( imageLimitRangeYamlFile = tmpdir + "image-limit-range.yaml" imageName1 = `quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c` imageName2 = `quay.io/openshifttest/hello-openshift:1.2.0` imageName3 = `quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83` imageStreamErr error ) exutil.By("1) Create new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() defer oc.AsAdmin().Run("delete").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() imageLimitRangeYaml := `apiVersion: v1 kind: LimitRange metadata: name: openshift-resource-limits spec: limits: - type: openshift.io/Image max: storage: 1Gi - type: openshift.io/ImageStream max: openshift.io/image-tags: 20 openshift.io/images: 1 ` exutil.By("2) Create a resource quota limit of the image with images limit 1") f, err := os.Create(imageLimitRangeYamlFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = w.WriteString(imageLimitRangeYaml) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) quotaErr := oc.AsAdmin().Run("create").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() o.Expect(quotaErr).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("3.) Applying a mystream:v1 image tag to %s in an image stream should succeed", imageName1)) tagErr := oc.AsAdmin().WithoutNamespace().Run("tag").Args(imageName1, "--source=docker", "mystream:v1", "-n", namespace).Execute() o.Expect(tagErr).NotTo(o.HaveOccurred()) // Inline steps will wait for tag 1 to get it imported successfully before adding tag 2 and this helps to avoid race-caused failure.Ref:OCPQE-7679. errImage := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { imageStreamOutput, imageStreamErr := oc.AsAdmin().WithoutNamespace().Run("describe").Args("imagestream", "mystream", "-n", namespace).Output() if imageStreamErr == nil { if strings.Contains(imageStreamOutput, imageName1) { e2e.Logf("Image is tag with v1 successfully\n%s", imageStreamOutput) return true, nil } } return false, nil }) exutil.AssertWaitPollNoErr(errImage, fmt.Sprintf("Image is tag with v1 is not successfull %s", imageStreamErr)) exutil.By(fmt.Sprintf("4.) Applying the mystream:v2 image tag to another %s in an image stream should fail due to the ImageStream max images limit", imageName2)) tagErr = oc.AsAdmin().WithoutNamespace().Run("tag").Args(imageName2, "--source=docker", "mystream:v2", "-n", namespace).Execute() o.Expect(tagErr).NotTo(o.HaveOccurred()) var imageStreamv2Err error errImageV2 := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { imageStreamv2Output, imageStreamv2Err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("imagestream", "mystream", "-n", namespace).Output() if imageStreamv2Err == nil { if strings.Contains(imageStreamv2Output, "Import failed") { e2e.Logf("Image is tag with v2 not successfull\n%s", imageStreamv2Output) return true, nil } } return false, nil }) exutil.AssertWaitPollNoErr(errImageV2, fmt.Sprintf("Image is tag with v2 is successfull %s", imageStreamv2Err)) exutil.By(`5.) Copying an image to the default internal registry of the cluster should be denied due to the max storage size limit for images`) destRegistry := "docker://" + defaultRegistryServiceURL + "/" + namespace + "/mystream:latest" publicImageUrl := "docker://" + imageName3 var output string errPoll := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err = copyImageToInternelRegistry(oc, namespace, publicImageUrl, destRegistry) if err != nil { if strings.Contains(output, "denied") { o.Expect(strings.Contains(output, "denied")).Should(o.BeTrue(), "Should deny copying"+publicImageUrl) return true, nil } } return false, nil }) if errPoll != nil { e2e.Logf("Failed to retrieve %v", output) exutil.AssertWaitPollNoErr(errPoll, "Failed to retrieve") } })
test case
openshift/openshift-tests-private
2d73efc4-a623-40c9-859d-e181f376a667
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-10970-[Apiserver] Create service with multiports
['"encoding/json"', '"fmt"', '"math/rand"', '"net/http"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-10970-[Apiserver] Create service with multiports", func() { var ( filename = "pod_with_multi_ports.json" filename1 = "pod-for-ping.json" podName1 = "hello-openshift" podName2 = "pod-for-ping" ) exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } exutil.By("1) Create new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() exutil.By(fmt.Sprintf("2) Create pod with resource file %s", filename)) template := getTestDataFilePath(filename) err := oc.Run("create").Args("-f", template, "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("3) Wait for pod with name %s to be ready", podName1)) exutil.AssertPodToBeReady(oc, podName1, namespace) exutil.By(fmt.Sprintf("4) Check host ip for pod %s", podName1)) hostIP, err := oc.Run("get").Args("pods", podName1, "-o=jsonpath={.status.hostIP}", "-n", namespace).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(hostIP).NotTo(o.Equal("")) e2e.Logf("Get host ip %s", hostIP) exutil.By("5) Create nodeport service with random service port") servicePort1 := rand.Intn(3000) + 6000 servicePort2 := rand.Intn(6001) + 9000 serviceErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("service", "nodeport", podName1, fmt.Sprintf("--tcp=%d:8080,%d:8443", servicePort1, servicePort2), "-n", namespace).Execute() o.Expect(serviceErr).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("6) Check the service with the node port %s", podName1)) nodePort1, err := oc.Run("get").Args("services", podName1, fmt.Sprintf("-o=jsonpath={.spec.ports[?(@.port==%d)].nodePort}", servicePort1)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(nodePort1).NotTo(o.Equal("")) nodePort2, err := oc.Run("get").Args("services", podName1, fmt.Sprintf("-o=jsonpath={.spec.ports[?(@.port==%d)].nodePort}", servicePort2)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(nodePort2).NotTo(o.Equal("")) e2e.Logf("Get node port %s :: %s", nodePort1, nodePort2) exutil.By(fmt.Sprintf("6.1) Create pod with resource file %s for checking network access", filename1)) template = getTestDataFilePath(filename1) err = oc.Run("create").Args("-f", template, "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("6.2) Wait for pod with name %s to be ready", podName2)) exutil.AssertPodToBeReady(oc, podName2, namespace) exutil.By(fmt.Sprintf("6.3) Check URL endpoint access")) checkURLEndpointAccess(oc, hostIP, nodePort1, podName2, "http", "hello-openshift http-8080") checkURLEndpointAccess(oc, hostIP, nodePort2, podName2, "https", "hello-openshift https-8443") exutil.By(fmt.Sprintf("6.4) Delete service %s", podName1)) err = oc.Run("delete").Args("service", podName1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("7) Create another service with random target ports %d :: %d", servicePort1, servicePort2)) err1 := oc.Run("create").Args("service", "clusterip", podName1, fmt.Sprintf("--tcp=%d:8080,%d:8443", servicePort1, servicePort2)).Execute() o.Expect(err1).NotTo(o.HaveOccurred()) defer oc.Run("delete").Args("service", podName1).Execute() exutil.By(fmt.Sprintf("7.1) Check cluster ip for pod %s", podName1)) clusterIP, serviceErr := oc.Run("get").Args("services", podName1, "-o=jsonpath={.spec.clusterIP}", "-n", namespace).Output() o.Expect(serviceErr).NotTo(o.HaveOccurred()) o.Expect(clusterIP).ShouldNot(o.BeEmpty()) e2e.Logf("Get node clusterIP :: %s", clusterIP) exutil.By(fmt.Sprintf("7.2) Check URL endpoint access again")) checkURLEndpointAccess(oc, clusterIP, strconv.Itoa(servicePort1), podName2, "http", "hello-openshift http-8080") checkURLEndpointAccess(oc, clusterIP, strconv.Itoa(servicePort2), podName2, "https", "hello-openshift https-8443") })
test case
openshift/openshift-tests-private
52f11ff2-bc7a-41b7-a67a-8b662cc9ee79
Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-12158-[Apiserver] Specify ResourceQuota on project
['"bufio"', '"context"', '"fmt"', '"net/http"', '"os"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-ConnectedOnly-Medium-12158-[Apiserver] Specify ResourceQuota on project", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig") && isEnabledCapability(oc, "ImageRegistry")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } var ( imageLimitRangeYamlFile = tmpdir + "image-limit-range.yaml" imageName1 = `quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c` imageName2 = `quay.io/openshifttest/hello-openshift:1.2.0` imageName3 = `quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83` imageStreamErr error ) exutil.By("1) Create new project required for this test execution") oc.SetupProject() namespace := oc.Namespace() defer oc.AsAdmin().Run("delete").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() imageLimitRangeYaml := `apiVersion: v1 kind: ResourceQuota metadata: name: openshift-object-counts spec: hard: openshift.io/imagestreams: "1" ` exutil.By("2) Create a resource quota limit of the imagestream with limit 1") f, err := os.Create(imageLimitRangeYamlFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = w.WriteString(imageLimitRangeYaml) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) quotaErr := oc.AsAdmin().Run("create").Args("-f", imageLimitRangeYamlFile, "-n", namespace).Execute() o.Expect(quotaErr).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("3.) Applying a mystream:v1 image tag to %s in an image stream should succeed", imageName1)) tagErr := oc.AsAdmin().WithoutNamespace().Run("tag").Args(imageName1, "--source=docker", "mystream:v1", "-n", namespace).Execute() o.Expect(tagErr).NotTo(o.HaveOccurred()) // Inline steps will wait for tag 1 to get it imported successfully before adding tag 2 and this helps to avoid race-caused failure.Ref:OCPQE-7679. errImage := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { imageStreamOutput, imageStreamErr := oc.AsAdmin().WithoutNamespace().Run("describe").Args("imagestream", "mystream", "-n", namespace).Output() if imageStreamErr == nil { if strings.Contains(imageStreamOutput, imageName1) { return true, nil } } return false, nil }) exutil.AssertWaitPollNoErr(errImage, fmt.Sprintf("Image tagging with v1 is not successful %s", imageStreamErr)) exutil.By(fmt.Sprintf("4.) Applying the mystream2:v1 image tag to another %s in an image stream should fail due to the ImageStream max limit", imageName2)) output, tagErr := oc.AsAdmin().WithoutNamespace().Run("tag").Args(imageName2, "--source=docker", "mystream2:v1", "-n", namespace).Output() o.Expect(tagErr).To(o.HaveOccurred()) o.Expect(string(output)).To(o.MatchRegexp("forbidden: [Ee]xceeded quota")) exutil.By(`5.) Copying an image to the default internal registry of the cluster should be denied due to the max imagestream limit for images`) destRegistry := "docker://" + defaultRegistryServiceURL + "/" + namespace + "/mystream3" publicImageUrl := "docker://" + imageName3 errPoll := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err = copyImageToInternelRegistry(oc, namespace, publicImageUrl, destRegistry) if err != nil { if strings.Contains(output, "denied") { o.Expect(strings.Contains(output, "denied")).Should(o.BeTrue(), "Should deny copying"+publicImageUrl) return true, nil } } return false, nil }) if errPoll != nil { e2e.Logf("Failed to retrieve %v", output) exutil.AssertWaitPollNoErr(errPoll, "Failed to retrieve") } })
test case
openshift/openshift-tests-private
3d0e704f-2a68-48ea-8319-6e732492613f
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Medium-68629-[Apiserver] Audit log files of apiservers should not have too permissive mode
['"fmt"', '"os/exec"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Medium-68629-[Apiserver] Audit log files of apiservers should not have too permissive mode", func() { directories := []string{ "/var/log/kube-apiserver/", "/var/log/openshift-apiserver/", "/var/log/oauth-apiserver/", } exutil.By("Get all master nodes.") masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) for i, directory := range directories { exutil.By(fmt.Sprintf("%v) Checking permissions for directory: %s\n", i+1, directory)) // Skip checking of hidden files cmd := fmt.Sprintf(`find %s -type f ! -perm 600 ! -name ".*" -exec ls -l {} +`, directory) for _, masterNode := range masterNodes { e2e.Logf("Checking permissions for directory: %s on node %s", directory, masterNode) masterNodeOutput, checkFileErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=openshift-kube-apiserver"}, "bash", "-c", cmd) o.Expect(checkFileErr).NotTo(o.HaveOccurred()) // Filter out the specific warning from the output lines := strings.Split(string(masterNodeOutput), "\n") cleanedLines := make([]string, 0, len(lines)) for _, line := range lines { if !strings.Contains(line, "Warning: metadata.name: this is used in the Pod's hostname") { cleanedLine := strings.TrimSpace(line) if cleanedLine != "" { cleanedLines = append(cleanedLines, cleanedLine) } } } // Iterate through the cleaned lines to check file permissions for _, line := range cleanedLines { if strings.Contains(line, "-rw-------.") { e2e.Logf("Node %s has a file with valid permissions 600 in %s:\n %s\n", masterNode, directory, line) } else { e2e.Failf("Node %s has a file with invalid permissions in %s:\n %v", masterNode, directory, line) } } } } })
test case
openshift/openshift-tests-private
130c2fa6-7a7d-4c45-aab1-84384f97762a
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Longduration-NonPreRelease-ConnectedOnly-Medium-68400-[Apiserver] Do not generate image pull secrets for internal registry when internal registry is disabled[Slow][Disruptive]
['"fmt"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-Longduration-NonPreRelease-ConnectedOnly-Medium-68400-[Apiserver] Do not generate image pull secrets for internal registry when internal registry is disabled[Slow][Disruptive]", func() { var ( namespace = "ocp-68400" secretOutput string dockerOutput string currentStep = 2 ) err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", namespace, "--ignore-not-found").Execute() exutil.By("1. Check Image registry's enabled") output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("configs.imageregistry.operator.openshift.io/cluster", "-o", `jsonpath='{.spec.managementState}'`).Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "Managed") { exutil.By(fmt.Sprintf("%v. Create serviceAccount test-a", currentStep)) err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", "test-a", "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("%v. Check if Token and Dockercfg Secrets of SA test-a are created.", currentStep+1)) secretOutput = getResourceToBeReady(oc, asAdmin, withoutNamespace, "secrets", "-n", namespace, "-o", "jsonpath='{range .items[*]}{.metadata.name}{\" \"}'") o.Expect(string(secretOutput)).To(o.ContainSubstring("test-a-dockercfg-")) exutil.By(fmt.Sprintf("%v. Disable the Internal Image Registry", currentStep+2)) defer func() { exutil.By("Recovering Internal image registry") output, err := oc.WithoutNamespace().AsAdmin().Run("patch").Args("configs.imageregistry/cluster", "-p", `{"spec":{"managementState":"Managed"}}`, "--type=merge").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "patched (no change)") { e2e.Logf("No changes to the internal image registry.") } else { exutil.By("Waiting KAS and Image registry reboot after the Internal Image Registry was enabled") e2e.Logf("Checking kube-apiserver operator should be in Progressing in 100 seconds") expectedStatus := map[string]string{"Progressing": "True"} err = waitCoBecomes(oc, "kube-apiserver", 100, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") expectedStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") err = waitCoBecomes(oc, "image-registry", 100, expectedStatus) exutil.AssertWaitPollNoErr(err, "image-registry operator is not becomes available in 100 seconds") } }() err = oc.WithoutNamespace().AsAdmin().Run("patch").Args("configs.imageregistry/cluster", "-p", `{"spec":{"managementState":"Removed"}}`, "--type=merge").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("%v. Waiting KAS and Image registry reboot after the Internal Image Registry was disabled", currentStep+3)) e2e.Logf("Checking kube-apiserver operator should be in Progressing in 100 seconds") expectedStatus := map[string]string{"Progressing": "True"} err = waitCoBecomes(oc, "kube-apiserver", 100, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") expectedStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") err = waitCoBecomes(oc, "image-registry", 100, expectedStatus) exutil.AssertWaitPollNoErr(err, "image-registry operator is not becomes available in 100 seconds") exutil.By(fmt.Sprintf("%v. Check if Token and Dockercfg Secrets of SA test-a are removed", currentStep+4)) secretOutput, err = getResource(oc, asAdmin, withoutNamespace, "secrets", "-n", namespace, "-o", `jsonpath={range .items[*]}{.metadata.name}`) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(secretOutput).Should(o.BeEmpty()) dockerOutput, err = getResource(oc, asAdmin, withoutNamespace, "sa", "test-a", "-n", namespace, "-o", `jsonpath='{.secrets[*].name}'`) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(dockerOutput).ShouldNot(o.ContainSubstring("dockercfg")) currentStep = currentStep + 5 } exutil.By(fmt.Sprintf("%v. Create serviceAccount test-b", currentStep)) err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", "test-b", "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("%v. Check if Token and Dockercfg Secrets of SA test-b are created.", currentStep+1)) secretOutput, err = getResource(oc, asAdmin, withoutNamespace, "secrets", "-n", namespace, "-o", `jsonpath={range .items[*]}{.metadata.name}`) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(secretOutput).Should(o.BeEmpty()) dockerOutput, err = getResource(oc, asAdmin, withoutNamespace, "sa", "test-b", "-n", namespace, "-o", `jsonpath='{.secrets[*].name}'`) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(dockerOutput).ShouldNot(o.ContainSubstring("dockercfg")) exutil.By(fmt.Sprintf("%v. Create new token and dockercfg secrets from any content for SA test-b", currentStep+2)) newSecretErr := oc.Run("create").Args("-n", namespace, "secret", "generic", "test-b-dockercfg-ocp68400", "--from-literal=username=myuser", "--from-literal=password=mypassword").NotShowInfo().Execute() o.Expect(newSecretErr).NotTo(o.HaveOccurred()) newSecretErr = oc.Run("create").Args("-n", namespace, "secret", "generic", "test-b-token-ocp68400", "--from-literal=username=myuser", "--from-literal=password=mypassword").NotShowInfo().Execute() o.Expect(newSecretErr).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("%v. Check if Token and Dockercfg Secrets of SA test-b are not removed", currentStep+3)) secretOutput = getResourceToBeReady(oc, asAdmin, withoutNamespace, "secrets", "-n", namespace, "-o", "jsonpath='{range .items[*]}{.metadata.name}'") o.Expect(string(secretOutput)).To(o.ContainSubstring("test-b-dockercfg-ocp68400")) o.Expect(string(secretOutput)).To(o.ContainSubstring("test-b-token-ocp68400")) exutil.By(fmt.Sprintf("%v. Check if Token and Dockercfg Secrets of SA test-b should not have serviceAccount references", currentStep+4)) secretOutput, err = getResource(oc, asAdmin, withoutNamespace, "secret", "test-b-token-ocp68400", "-n", namespace, "-o", `jsonpath={.metadata.annotations.kubernetes\.io/service-account\.name}`) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(secretOutput).Should(o.BeEmpty()) secretOutput, err = getResource(oc, asAdmin, withoutNamespace, "secret", "test-b-dockercfg-ocp68400", "-n", namespace, "-o", `jsonpath={.metadata.annotations.kubernetes\.io/service-account\.name}`) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(secretOutput).Should(o.BeEmpty()) exutil.By(fmt.Sprintf("%v. Pull image from public registry after disabling internal registry", currentStep+5)) err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("registry.access.redhat.com/ubi8/httpd-24", "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) podName := getPodsList(oc.AsAdmin(), namespace) exutil.AssertPodToBeReady(oc, podName[0], namespace) })
test case
openshift/openshift-tests-private
92023f81-28a5-4166-b6ea-dd1676ed2d99
Author:rgangwar-WRS-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-70020-V-CM.02-Add new custom certificate for the cluster API [Disruptive] [Slow]
['"crypto/tls"', '"encoding/base64"', '"fmt"', '"io/ioutil"', '"os"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-WRS-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-70020-V-CM.02-Add new custom certificate for the cluster API [Disruptive] [Slow]", func() { var ( patchToRecover = `{"spec":{"servingCerts": {"namedCertificates": null}}}` originKubeconfigBkp = "kubeconfig.origin" originKubeconfig = os.Getenv("KUBECONFIG") originCA = tmpdir + "certificate-authority-data-origin.crt" newCA = tmpdir + "certificate-authority-data-origin-new.crt" CN_BASE = "kas-test-cert" caKeypem = tmpdir + "/caKey.pem" caCertpem = tmpdir + "/caCert.pem" serverKeypem = tmpdir + "/serverKey.pem" serverconf = tmpdir + "/server.conf" serverWithSANcsr = tmpdir + "/serverWithSAN.csr" serverCertWithSAN = tmpdir + "/serverCertWithSAN.pem" originKubeconfPath string ) restoreCluster := func(oc *exutil.CLI) { err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster").Execute() o.Expect(err).NotTo(o.HaveOccurred()) output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", "openshift-config").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "custom-api-cert") { err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "custom-api-cert", "-n", "openshift-config", "--ignore-not-found").Execute() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Cluster openshift-config secret reset to default values") } } updateKubeconfigWithConcatenatedCert := func(caCertPath, originCertPath, kubeconfigPath string, newCertPath string) error { caCert, err := ioutil.ReadFile(caCertPath) o.Expect(err).NotTo(o.HaveOccurred()) originCert, err := ioutil.ReadFile(originCertPath) o.Expect(err).NotTo(o.HaveOccurred()) concatenatedCert := append(caCert, originCert...) err = ioutil.WriteFile(newCertPath, concatenatedCert, 0644) o.Expect(err).NotTo(o.HaveOccurred()) base64EncodedCert := base64.StdEncoding.EncodeToString(concatenatedCert) updateCmdKubeconfg := fmt.Sprintf(`sed -i "s/certificate-authority-data: .*/certificate-authority-data: %s/" %s`, base64EncodedCert, kubeconfigPath) _, err = exec.Command("bash", "-c", updateCmdKubeconfg).Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Kubeconfig file updated successfully.") return nil } defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "custom-api-cert", "-n", "openshift-config", "--ignore-not-found").Execute() defer func() { exutil.By("Restoring cluster") _, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=merge", "-p", patchToRecover).Output() e2e.Logf("Restore original kubeconfig") bkpCmdKubeConf := fmt.Sprintf(`cp %s %s`, originKubeconfPath, originKubeconfig) _, err := exec.Command("bash", "-c", bkpCmdKubeConf).Output() o.Expect(err).NotTo(o.HaveOccurred()) restoreCluster(oc) e2e.Logf("Cluster recovered") }() fqdnName, port := getApiServerFQDNandPort(oc, false) //Taking backup of old kubeconfig to restore old kubeconfig exutil.By("1. Get the original kubeconfig backup") originKubeconfPath = CopyToFile(originKubeconfig, originKubeconfigBkp) exutil.By("2. Get the original CA") caCmd := fmt.Sprintf(`grep certificate-authority-data %s | grep -Eo "[^ ]+$" | base64 -d > %s`, originKubeconfig, originCA) _, err := exec.Command("bash", "-c", caCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create certificates with SAN.") opensslCMD := fmt.Sprintf("openssl genrsa -out %v 2048", caKeypem) _, caKeyErr := exec.Command("bash", "-c", opensslCMD).Output() o.Expect(caKeyErr).NotTo(o.HaveOccurred()) opensslCMD = fmt.Sprintf(`openssl req -x509 -new -nodes -key %v -days 100000 -out %v -subj "/CN=%s_ca"`, caKeypem, caCertpem, CN_BASE) _, caCertErr := exec.Command("bash", "-c", opensslCMD).Output() o.Expect(caCertErr).NotTo(o.HaveOccurred()) opensslCMD = fmt.Sprintf("openssl genrsa -out %v 2048", serverKeypem) _, serverKeyErr := exec.Command("bash", "-c", opensslCMD).Output() o.Expect(serverKeyErr).NotTo(o.HaveOccurred()) serverconfCMD := fmt.Sprintf(`cat > %v << EOF [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, serverAuth subjectAltName = @alt_names [alt_names] DNS.1 = %s EOF`, serverconf, fqdnName) _, serverconfErr := exec.Command("bash", "-c", serverconfCMD).Output() o.Expect(serverconfErr).NotTo(o.HaveOccurred()) serverWithSANCMD := fmt.Sprintf(`openssl req -new -key %v -out %v -subj "/CN=%s_server" -config %v`, serverKeypem, serverWithSANcsr, CN_BASE, serverconf) _, serverWithSANErr := exec.Command("bash", "-c", serverWithSANCMD).Output() o.Expect(serverWithSANErr).NotTo(o.HaveOccurred()) serverCertWithSANCMD := fmt.Sprintf(`openssl x509 -req -in %v -CA %v -CAkey %v -CAcreateserial -out %v -days 100000 -extensions v3_req -extfile %s`, serverWithSANcsr, caCertpem, caKeypem, serverCertWithSAN, serverconf) _, serverCertWithSANErr := exec.Command("bash", "-c", serverCertWithSANCMD).Output() o.Expect(serverCertWithSANErr).NotTo(o.HaveOccurred()) exutil.By("4. Creating custom secret using server certificate") err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "tls", "custom-api-cert", "--cert="+serverCertWithSAN, "--key="+serverKeypem, "-n", "openshift-config").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. Add new certificate to apiserver") patchCmd := fmt.Sprintf(`{"spec":{"servingCerts": {"namedCertificates": [{"names": ["%s"], "servingCertificate": {"name": "custom-api-cert"}}]}}}`, fqdnName) err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=merge", "-p", patchCmd).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. Add new certificates to kubeconfig") // To avoid error "Unable to connect to the server: tls: failed to verify certificate: x509: certificate signed by unknown authority." updating kubeconfig err = updateKubeconfigWithConcatenatedCert(caCertpem, originCA, originKubeconfig, newCA) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("7. Checking KAS operator should be in Progressing in 300 seconds") expectedStatus := map[string]string{"Progressing": "True"} // Increasing wait time for prow ci failures err = waitCoBecomes(oc, "kube-apiserver", 300, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 300 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") expectedStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") exutil.By("8. Validate new certificates") returnValues := []string{"Subject", "Issuer"} certDetails, err := urlHealthCheck(fqdnName, port, caCertpem, returnValues) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(string(certDetails.Subject)).To(o.ContainSubstring("CN=kas-test-cert_server")) o.Expect(string(certDetails.Issuer)).To(o.ContainSubstring("CN=kas-test-cert_ca")) exutil.By("9. Validate old certificates should not work") certDetails, err = urlHealthCheck(fqdnName, port, originCA, returnValues) o.Expect(err).To(o.HaveOccurred()) })
test case
openshift/openshift-tests-private
6f109dcd-babf-46af-b3e1-39ee81095996
Author:rgangwar-ROSA-ARO-OSD_CCS-NonPreRelease-PstChkUpgrade-Medium-34223-[Apiserver] kube-apiserver and openshift-apiserver should have zero-disruption upgrade
['"regexp"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-ROSA-ARO-OSD_CCS-NonPreRelease-PstChkUpgrade-Medium-34223-[Apiserver] kube-apiserver and openshift-apiserver should have zero-disruption upgrade", func() { defer oc.AsAdmin().WithoutNamespace().Run("ns").Args("project", "ocp-34223-proj", "--ignore-not-found").Execute() cmExistsCmd, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "log", "-n", "ocp-34223-proj").Output() if strings.Contains(cmExistsCmd, "No resources found") || err != nil { g.Skip("Skipping case as ConfigMap ocp-34223 does not exist") } result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/log", "-n", "ocp-34223-proj", "-o", "yaml").Output() o.Expect(err).NotTo(o.HaveOccurred()) // Check if the result contains any failure messages failures := regexp.MustCompile(`failed`).FindAllString(result, -1) // Verify if there are less than or equal to 1 failure message if len(failures) <= 1 { e2e.Logf("Test case paased: Zero-disruption upgrade") } else { e2e.Failf("Test case failed: Upgrade disruption detected::\n %v", failures) } })
test case
openshift/openshift-tests-private
e92fd849-4cb4-423e-ad52-43a1e8ee7efb
Author:kewang-LEVEL0-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-ConnectedOnly-Critical-10873-Access app througth secure service and regenerate service serving certs if it about to expire [Slow]
['"bytes"', '"crypto/tls"', '"encoding/json"', '"fmt"', '"net/url"', '"os"', '"path/filepath"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-LEVEL0-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-ConnectedOnly-Critical-10873-Access app througth secure service and regenerate service serving certs if it about to expire [Slow]", func() { var ( filename = "aosqe-pod-for-ping.json" podName = "hello-pod" caseID = "ocp10873" stepExecTime time.Time ) exutil.By("1) Create new project for the test case.") oc.SetupProject() testNamespace := oc.Namespace() exutil.By("2) The appropriate pod security labels are applied to the new project.") applyLabel(oc, asAdmin, withoutNamespace, "ns", testNamespace, "security.openshift.io/scc.podSecurityLabelSync=false", "--overwrite") applyLabel(oc, asAdmin, withoutNamespace, "ns", testNamespace, "pod-security.kubernetes.io/warn=privileged", "--overwrite") applyLabel(oc, asAdmin, withoutNamespace, "ns", testNamespace, "pod-security.kubernetes.io/audit=privileged", "--overwrite") applyLabel(oc, asAdmin, withoutNamespace, "ns", testNamespace, "pod-security.kubernetes.io/enforce=privileged", "--overwrite") exutil.By("3) Add SCC privileged to the project.") err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+testNamespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("4) Create a service.") template := getTestDataFilePath(caseID + "-svc.json") svcErr := oc.Run("create").Args("-f", template).Execute() o.Expect(svcErr).NotTo(o.HaveOccurred()) stepExecTime = time.Now() exutil.By("5) Create a nginx webserver app with deploymnet.") template = getTestDataFilePath(caseID + "-dc.yaml") dcErr := oc.Run("create").Args("-f", template).Execute() o.Expect(dcErr).NotTo(o.HaveOccurred()) appPodName := getPodsListByLabel(oc.AsAdmin(), testNamespace, "name=web-server-rc")[0] exutil.AssertPodToBeReady(oc, appPodName, testNamespace) cmName, err := getResource(oc, asAdmin, withoutNamespace, "configmaps", "nginx-config", "-n", testNamespace, "-o=jsonpath={.metadata.name}") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(cmName).ShouldNot(o.BeEmpty(), "The ConfigMap 'nginx-config' name should not be empty") exutil.By(fmt.Sprintf("6.1) Create pod with resource file %s.", filename)) template = getTestDataFilePath(filename) err = oc.Run("create").Args("-f", template).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(fmt.Sprintf("6.2) Wait for pod with name %s to be ready.", podName)) exutil.AssertPodToBeReady(oc, podName, testNamespace) url := fmt.Sprintf("https://hello.%s.svc:443", testNamespace) execCmd := fmt.Sprintf("curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt %s", url) curlCmdOutput := ExecCommandOnPod(oc, podName, testNamespace, execCmd) o.Expect(curlCmdOutput).Should(o.ContainSubstring("Hello-OpenShift")) exutil.By("7) Extract the cert and key from secret ssl-key.") err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("-n", testNamespace, "secret/ssl-key", "--to", tmpdir).Execute() o.Expect(err).NotTo(o.HaveOccurred()) tlsCrtFile := filepath.Join(tmpdir, "tls.crt") tlsCrt, err := os.ReadFile(tlsCrtFile) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(tlsCrt).ShouldNot(o.BeEmpty()) // Set the new expiry(1 hour + 1 minute) after the time of the secret ssl-key was created exutil.By("8) Set the new expiry annotations to the secret ssl-key.") tlsCrtCreation, err := getResource(oc, asAdmin, withoutNamespace, "secret", "ssl-key", "-n", testNamespace, "-o=jsonpath={.metadata.creationTimestamp}") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(tlsCrtCreation).ShouldNot(o.BeEmpty()) e2e.Logf("created time:%s", tlsCrtCreation) tlsCrtCreationTime, err := time.Parse(time.RFC3339, tlsCrtCreation) o.Expect(err).NotTo(o.HaveOccurred()) newExpiry := tlsCrtCreationTime.Add(time.Since(stepExecTime) + 1*time.Hour + 60*time.Second) newExpiryStr := fmt.Sprintf(`"%s"`, newExpiry.Format(time.RFC3339)) logger.Debugf("The new expiry of the secret ssl-key is %s", newExpiryStr) annotationPatch := fmt.Sprintf(`{"metadata":{"annotations": {"service.alpha.openshift.io/expiry": %s, "service.beta.openshift.io/expiry": %s}}}`, newExpiryStr, newExpiryStr) errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("secret", "ssl-key", "-n", testNamespace, "--type=merge", "-p", annotationPatch).Execute() o.Expect(errPatch).NotTo(o.HaveOccurred()) exutil.By("9) Check secret ssl-key again and shouldn't change When the expiry time is great than 1h.") o.Eventually(func() bool { err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("-n", testNamespace, "secret/ssl-key", "--to", tmpdir, "--confirm=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) tlsCrt1, err := os.ReadFile(tlsCrtFile) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(tlsCrt1).ShouldNot(o.BeEmpty()) if !bytes.Equal(tlsCrt, tlsCrt1) { logger.Infof("When the expiry time has less than 1h left, the cert has been regenerated") return true } logger.Infof("When the expiry time has more than 1h left, the cert will not regenerate") return false }, "25m", "60s").Should(o.Equal(true), "Failed to regenerate the new secret ssl-key When the expiry time is greater than 1h") exutil.By(fmt.Sprintf("10) Using the regenerated secret ssl-key to access web app in pod %s without error.", podName)) exutil.AssertPodToBeReady(oc, podName, testNamespace) curlCmdOutput = ExecCommandOnPod(oc, podName, testNamespace, execCmd) o.Expect(curlCmdOutput).Should(o.ContainSubstring("Hello-OpenShift")) })
test case
openshift/openshift-tests-private
1b00ea45-fd73-4819-a0e2-91b0fd083651
Author:kewang-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-73410-V-BR.22-V-BR.33-V-BR.39-Support customRules list for by-group with none profile to the audit configuration [Disruptive][Slow]
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-WRS-NonHyperShiftHOST-NonPreRelease-ROSA-ARO-OSD_CCS-Longduration-High-73410-V-BR.22-V-BR.33-V-BR.39-Support customRules list for by-group with none profile to the audit configuration [Disruptive][Slow]", func() { var ( patchCustomRules string auditEventCount int users []User usersHTpassFile string htPassSecret string ) defer func() { contextErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", "admin").Execute() o.Expect(contextErr).NotTo(o.HaveOccurred()) contextOutput, contextErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-context").Output() o.Expect(contextErr).NotTo(o.HaveOccurred()) e2e.Logf("Context after rollback :: %v", contextOutput) //Reset customRules profile to default one. output := setAuditProfile(oc, "apiserver/cluster", `[{"op": "remove", "path": "/spec/audit"}]`) if strings.Contains(output, "patched (no change)") { e2e.Logf("Apiserver/cluster's audit profile not changed from the default values") } userCleanup(oc, users, usersHTpassFile, htPassSecret) }() // Get user detail used by the test and cleanup after execution. users, usersHTpassFile, htPassSecret = getNewUser(oc, 2) exutil.By("1. Configure audit config for customRules system:authenticated:oauth profile as None and audit profile as Default") patchCustomRules = `[{"op": "replace", "path": "/spec/audit", "value": {"customRules": [ {"group": "system:authenticated:oauth","profile": "None"}],"profile": "Default"}}]` setAuditProfile(oc, "apiserver/cluster", patchCustomRules) exutil.By("2. Check audit events should be zero after login operation") auditEventLog, auditEventCount := checkUserAuditLog(oc, "system:authenticated:oauth", users[0].Username, users[0].Password) if auditEventCount > 0 { e2e.Logf("Event Logs :: %v", auditEventLog) } o.Expect(auditEventCount).To(o.BeNumerically("==", 0)) exutil.By("3. Configure audit config for customRules system:authenticated:oauth profile as Default and audit profile as Default") patchCustomRules = `[{"op": "replace", "path": "/spec/audit", "value": {"customRules": [ {"group": "system:authenticated:oauth","profile": "Default"}],"profile": "Default"}}]` setAuditProfile(oc, "apiserver/cluster", patchCustomRules) exutil.By("4. Check audit events should be greater than zero after login operation") err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { _, auditEventCount = checkUserAuditLog(oc, "system:authenticated:oauth", users[1].Username, users[1].Password) if auditEventCount > 0 { return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Test Case failed :: Audit events count is not greater than zero after login operation :: %v", auditEventCount)) })
test case
openshift/openshift-tests-private
bf56e1aa-b774-4150-814b-4fc048776d3b
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-ConnectedOnly-Medium-70369-[Apiserver] Use bound service account tokens when generating pull secrets.
['"fmt"', '"strings"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-ConnectedOnly-Medium-70369-[Apiserver] Use bound service account tokens when generating pull secrets.", func() { var ( secretOutput string randomSaAcc = "test-" + exutil.GetRandomString() ) oc.SetupProject() namespace := oc.Namespace() exutil.By("1. Check if Image registry is enabled") output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("configs.imageregistry.operator.openshift.io/cluster", "-o", `jsonpath='{.spec.managementState}'`).Output() o.Expect(err).NotTo(o.HaveOccurred()) if !strings.Contains(output, "Managed") { g.Skip("Skipping case as registry is not enabled") } exutil.By("2. Create serviceAccount " + randomSaAcc) err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", randomSaAcc, "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Check if Token Secrets of SA " + randomSaAcc + " are created.") secretOutput = getResourceToBeReady(oc, asAdmin, withoutNamespace, "secrets", "-n", namespace, "-o", `jsonpath={range .items[*]}{.metadata.name}{" "}{end}`) o.Expect(secretOutput).ShouldNot(o.BeEmpty()) o.Expect(secretOutput).ShouldNot(o.ContainSubstring("token")) o.Expect(secretOutput).Should(o.ContainSubstring("dockercfg")) exutil.By("4. Create a deployment that uses an image from the internal registry") podTemplate := getTestDataFilePath("ocp-70369.yaml") params := []string{"-n", namespace, "-f", podTemplate, "-p", fmt.Sprintf("NAMESPACE=%s", namespace), "SERVICE_ACCOUNT_NAME=" + randomSaAcc} configFile := exutil.ProcessTemplate(oc, params...) err = oc.AsAdmin().Run("create").Args("-f", configFile, "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) podName := getPodsList(oc.AsAdmin(), namespace) o.Expect(podName).NotTo(o.BeEmpty()) exutil.AssertPodToBeReady(oc, podName[0], namespace) exutil.By("5. Verify the `openshift.io/internal-registry-pull-secret-ref` annotation in the ServiceAccount") serviceCaOutput := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pod", podName[0], "-n", namespace, "-o", `jsonpath={.spec.serviceAccount}`) o.Expect(serviceCaOutput).Should(o.ContainSubstring(randomSaAcc)) imageSecretOutput := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pod", podName[0], "-n", namespace, "-o", `jsonpath={.spec.imagePullSecrets[*].name}`) o.Expect(imageSecretOutput).Should(o.ContainSubstring(randomSaAcc + "-dockercfg")) imageSaOutput := getResourceToBeReady(oc, asAdmin, withoutNamespace, "sa", randomSaAcc, "-n", namespace, "-o", `jsonpath={.metadata.annotations.openshift\.io/internal-registry-pull-secret-ref}`) o.Expect(imageSaOutput).Should(o.ContainSubstring(randomSaAcc + "-dockercfg")) // Adding this step related to bug https://issues.redhat.com/browse/OCPBUGS-36833 exutil.By("6. Verify no reconciliation loops cause unbounded dockercfg secret creation") saName := "my-test-sa" // Define the ServiceAccount in YAML format saYAML := fmt.Sprintf(`apiVersion: v1 kind: ServiceAccount metadata: name: %s `, saName) // Create or replace the ServiceAccount multiple times for i := 0; i < 10; i++ { output, err := oc.WithoutNamespace().AsAdmin().Run("create").Args("-n", namespace, "-f", "-").InputString(saYAML).Output() if err != nil { if !strings.Contains(output, "AlreadyExists") { e2e.Failf("Failed to create ServiceAccount: %v", err.Error()) } else { // Replace the ServiceAccount if it already exists err = oc.WithoutNamespace().AsAdmin().Run("replace").Args("-n", namespace, "-f", "-").InputString(saYAML).Execute() if err != nil { e2e.Failf("Failed to replace ServiceAccount: %v", err) } e2e.Logf("ServiceAccount %s replaced\n", saName) } } else { e2e.Logf("ServiceAccount %s created\n", saName) } time.Sleep(2 * time.Second) // Sleep to ensure secrets generation } // List ServiceAccounts and secrets saList := getResourceToBeReady(oc, true, true, "-n", namespace, "sa", saName, "-o=jsonpath={.metadata.name}") if saList == "" { e2e.Failf("ServiceAccount %s not found", saName) } e2e.Logf("ServiceAccount found: %s", saName) saNameSecretTypes, err := getResource(oc, true, true, "-n", namespace, "secrets", `-o`, `jsonpath={range .items[?(@.metadata.ownerReferences[0].name=="`+saName+`")]}{.type}{"\n"}{end}`) if err != nil { e2e.Failf("Failed to get secrets: %v", err) } secretTypes := strings.Split(saNameSecretTypes, "\n") // Count the values dockerCfgCount := 0 serviceAccountTokenCount := 0 for _, secretType := range secretTypes { switch secretType { case "kubernetes.io/dockercfg": dockerCfgCount++ case "kubernetes.io/service-account-token": serviceAccountTokenCount++ } } if dockerCfgCount != 1 || serviceAccountTokenCount != 0 { e2e.Failf("Expected 1 dockercfg secret and 0 token secret, but found %d dockercfg secrets and %d token secrets", dockerCfgCount, serviceAccountTokenCount) } e2e.Logf("Correct number of secrets found, there is no reconciliation loops causing unbounded dockercfg secret creation") })
test case
openshift/openshift-tests-private
fbb36662-0b8c-4847-a7ac-b991515d5c02
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73853-[Apiserver] Update existing alert KubeAPIErrorBudgetBurn [Slow] [Disruptive]
['"context"', '"fmt"', '"net/url"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"github.com/tidwall/gjson"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73853-[Apiserver] Update existing alert KubeAPIErrorBudgetBurn [Slow] [Disruptive]", func() { if isSNOCluster(oc) { g.Skip("This is a SNO cluster, skip.") } var ( alertBudget = "KubeAPIErrorBudgetBurn" runbookBudgetURL = "https://github.com/openshift/runbooks/blob/master/alerts/cluster-kube-apiserver-operator/KubeAPIErrorBudgetBurn.md" alertTimeWarning = "2m" alertTimeCritical = "15m" alertTimeWarningExt = "1h" alertTimeCriticalExt = "3h" severity = []string{"critical", "critical"} severityExtended = []string{"warning", "warning"} timeSleep = 900 ) exutil.By("1. Check cluster with the following changes for existing alerts " + alertBudget + " have been applied.") output, alertBasicErr := getResource(oc, asAdmin, withoutNamespace, "prometheusrule/kube-apiserver-slos-basic", "-n", "openshift-kube-apiserver", "-o", `jsonpath='{.spec.groups[?(@.name=="kube-apiserver-slos-basic")].rules[?(@.alert=="`+alertBudget+`")].labels.severity}'`) o.Expect(alertBasicErr).NotTo(o.HaveOccurred()) chkStr := fmt.Sprintf("%s %s", severity[0], severity[1]) o.Expect(output).Should(o.ContainSubstring(chkStr), fmt.Sprintf("Not have new alert %s with severity :: %s : %s", alertBudget, severity[0], severity[1])) e2e.Logf("Have new alert %s with severity :: %s : %s", alertBudget, severity[0], severity[1]) outputExt, alertExtErr := getResource(oc, asAdmin, withoutNamespace, "prometheusrule/kube-apiserver-slos-extended", "-n", "openshift-kube-apiserver", "-o", `jsonpath='{.spec.groups[?(@.name=="kube-apiserver-slos-extended")].rules[?(@.alert=="`+alertBudget+`")].labels.severity}'`) o.Expect(alertExtErr).NotTo(o.HaveOccurred()) chkExtStr := fmt.Sprintf("%s %s", severityExtended[0], severityExtended[1]) o.Expect(outputExt).Should(o.ContainSubstring(chkExtStr), fmt.Sprintf("Not have new alert %s with severity :: %s : %s", alertBudget, severityExtended[0], severityExtended[1])) e2e.Logf("Have new alert %s with severity :: %s : %s", alertBudget, severityExtended[0], severityExtended[1]) e2e.Logf("Check reduce severity to %s and %s for :: %s : %s", severity[0], severity[1], alertTimeWarning, alertTimeCritical) output, sevBasicErr := getResource(oc, asAdmin, withoutNamespace, "prometheusrule/kube-apiserver-slos-basic", "-n", "openshift-kube-apiserver", "-o", `jsonpath='{.spec.groups[?(@.name=="kube-apiserver-slos-basic")].rules[?(@.alert=="`+alertBudget+`")].for}'`) o.Expect(sevBasicErr).NotTo(o.HaveOccurred()) chkStr = fmt.Sprintf("%s %s", alertTimeWarning, alertTimeCritical) o.Expect(output).Should(o.ContainSubstring(chkStr), fmt.Sprintf("Not Have reduce severity to %s and %s for :: %s : %s", severity[0], severity[1], alertTimeWarning, alertTimeCritical)) e2e.Logf("Have reduce severity to %s and %s for :: %s : %s", severity[0], severity[1], alertTimeWarning, alertTimeCritical) e2e.Logf("Check reduce severity to %s and %s for :: %s : %s", severityExtended[0], severityExtended[1], alertTimeWarningExt, alertTimeCriticalExt) outputExtn, sevExtErr := getResource(oc, asAdmin, withoutNamespace, "prometheusrule/kube-apiserver-slos-extended", "-n", "openshift-kube-apiserver", "-o", `jsonpath='{.spec.groups[?(@.name=="kube-apiserver-slos-extended")].rules[?(@.alert=="`+alertBudget+`")].for}'`) o.Expect(sevExtErr).NotTo(o.HaveOccurred()) chkStr = fmt.Sprintf("%s %s", alertTimeWarningExt, alertTimeCriticalExt) o.Expect(outputExtn).Should(o.ContainSubstring(chkStr), fmt.Sprintf("Not Have reduce severity to %s and %s for :: %s : %s", severityExtended[0], severityExtended[1], alertTimeWarningExt, alertTimeCriticalExt)) e2e.Logf("Have reduce severity to %s and %s for :: %s : %s", severityExtended[0], severityExtended[1], alertTimeWarningExt, alertTimeCriticalExt) e2e.Logf("Check a run book url for %s", alertBudget) output = getResourceToBeReady(oc, asAdmin, withoutNamespace, "prometheusrule/kube-apiserver-slos-basic", "-n", "openshift-kube-apiserver", "-o", `jsonpath='{.spec.groups[?(@.name=="kube-apiserver-slos-basic")].rules[?(@.alert=="`+alertBudget+`")].annotations.runbook_url}'`) o.Expect(output).Should(o.ContainSubstring(runbookBudgetURL), fmt.Sprintf("%s Runbook url not found :: %s", alertBudget, runbookBudgetURL)) e2e.Logf("Have a run book url for %s :: %s", alertBudget, runbookBudgetURL) exutil.By("2. Test the " + alertBudget + "alert firing/pending") e2e.Logf("Checking for available network interfaces on the master node") masterNode, masterErr := exutil.GetFirstMasterNode(oc) o.Expect(masterErr).NotTo(o.HaveOccurred()) e2e.Logf("Master node is %v : ", masterNode) cmd := `for iface in $(ls /sys/class/net | grep -oP '^(env|ens|eth|enp)\w+'); do ip link show $iface | grep -q 'master' && echo "$iface" || true; done` ethName, ethErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=openshift-kube-apiserver"}, "bash", "-c", cmd) o.Expect(ethErr).NotTo(o.HaveOccurred()) ethName = strings.TrimSpace(ethName) o.Expect(ethName).ShouldNot(o.BeEmpty()) e2e.Logf("Found Ethernet :: %v", ethName) e2e.Logf(`Simulating network conditions: "50%% packet loss on the master node"`) channel := make(chan string) go func() { defer g.GinkgoRecover() cmdStr := fmt.Sprintf(`tc qdisc add dev %s root netem loss 50%%; sleep %v; tc qdisc del dev %s root`, ethName, timeSleep, ethName) output, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", fmt.Sprintf("nodes/%s", masterNode), "--", "chroot", "/host", "/bin/bash", "-c", cmdStr).Output() e2e.Logf("Output:%s", output) channel <- output }() defer func() { receivedMsg := <-channel e2e.Logf("ReceivedMsg:%s", receivedMsg) }() e2e.Logf("Check alert " + alertBudget + " firing/pending") errWatcher := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeSleep)*time.Second, false, func(cxt context.Context) (bool, error) { alertOutput, _ := GetAlertsByName(oc, alertBudget) alertName := gjson.Parse(alertOutput).String() alertOutputWarning1 := gjson.Get(alertName, `data.alerts.#(labels.alertname=="`+alertBudget+`")#`).String() alertOutputWarning2 := gjson.Get(alertOutputWarning1, `#(labels.severity=="`+severityExtended[0]+`").state`).String() if strings.Contains(string(alertOutputWarning2), "pending") || strings.Contains(string(alertOutputWarning2), "firing") { e2e.Logf("%s with %s is pending/firing", alertBudget, severityExtended[0]) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errWatcher, fmt.Sprintf("%s with %s is not firing or pending", alertBudget, severityExtended[0])) })
test case
openshift/openshift-tests-private
0e7fae02-8a56-4972-91ba-5c0ea70dc4dd
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73949-[Apiserver] Update existing alert AuditLogError [Slow] [Disruptive]
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"github.com/tidwall/gjson"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73949-[Apiserver] Update existing alert AuditLogError [Slow] [Disruptive]", func() { var ( alertBudget = "AuditLogError" alertTimeWarning = "1m" severity = "warning" namespace = "openshift-kube-apiserver" lockCmd = `sudo chattr +i /var/log/audit; \ sudo chattr +i /var/log/audit/*; \ sudo chattr +i /var/log/openshift-apiserver; \ sudo chattr +i /var/log/openshift-apiserver/*; \ sudo chattr +i /var/log/kube-apiserver; \ sudo chattr +i /var/log/kube-apiserver/*` unlockCmd = `sudo chattr -i /var/log/audit; \ sudo chattr -i /var/log/audit/*; \ sudo chattr -i /var/log/openshift-apiserver; \ sudo chattr -i /var/log/openshift-apiserver/*; \ sudo chattr -i /var/log/kube-apiserver; \ sudo chattr -i /var/log/kube-apiserver/*` ) exutil.By("1. Check if the following changes for existing alerts " + alertBudget + " have been applied to the cluster.") output, alertBasicErr := getResource(oc, asAdmin, withoutNamespace, "prometheusrule/audit-errors", "-n", namespace, "-o", `jsonpath='{.spec.groups[?(@.name=="apiserver-audit")].rules[?(@.alert=="`+alertBudget+`")].labels.severity}'`) o.Expect(alertBasicErr).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(severity), fmt.Sprintf("New alert %s with severity :: %s does not exist", alertBudget, severity)) e2e.Logf("Have new alert %s with severity :: %s", alertBudget, severity) e2e.Logf("Check reduce severity to %s for :: %s", severity, alertTimeWarning) output, sevBasicErr := getResource(oc, asAdmin, withoutNamespace, "prometheusrule/audit-errors", "-n", namespace, "-o", `jsonpath='{.spec.groups[?(@.name=="apiserver-audit")].rules[?(@.alert=="`+alertBudget+`")].for}'`) o.Expect(sevBasicErr).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(alertTimeWarning), fmt.Sprintf("Not Have reduce severity to %s for :: %s", severity, alertTimeWarning)) e2e.Logf("Have reduce severity to %s for :: %s", severity, alertTimeWarning) exutil.By("2. Test the " + alertBudget + "alert firing/pending") masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) defer func() { for _, masterNode := range masterNodes { e2e.Logf("Rollback permissions of auditLogs on the node :: %s", masterNode) _, debugErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=" + namespace}, "bash", "-c", unlockCmd) o.Expect(debugErr).NotTo(o.HaveOccurred()) } errWatcher := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 600*time.Second, false, func(cxt context.Context) (bool, error) { alertOutput, _ := GetAlertsByName(oc, alertBudget) alertName := gjson.Parse(alertOutput).String() alertOutputWarning1 := gjson.Get(alertName, `data.alerts.#(labels.alertname=="`+alertBudget+`")#`).String() alertOutputWarning2 := gjson.Get(alertOutputWarning1, `#(labels.severity=="`+severity+`").state`).String() if !strings.Contains(string(alertOutputWarning2), "pending") && !strings.Contains(string(alertOutputWarning2), "firing") { e2e.Logf("Alert %s is resolved and it is not pending/firing", alertBudget) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errWatcher, fmt.Sprintf("%s with %s is still firing or pending after issue resolved", alertBudget, severity)) }() for _, masterNode := range masterNodes { e2e.Logf("Changing permissions of auditLogs on the node :: %s", masterNode) _, debugErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=" + namespace}, "bash", "-c", lockCmd) o.Expect(debugErr).NotTo(o.HaveOccurred()) } e2e.Logf("Check if alert " + alertBudget + " is firing/pending") errWatcher := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 1500*time.Second, false, func(cxt context.Context) (bool, error) { oc.AsAdmin().WithoutNamespace().Run("new-project").Args("test-profile-cm-ocp73949", "--skip-config-write").Execute() oc.WithoutNamespace().Run("delete").Args("project", "test-profile-cm-ocp73949", "--ignore-not-found").Execute() for _, masterNode := range masterNodes { exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=" + namespace}, "bash", "-c", `for i in {0..20}; do sudo echo 'test' >> /var/log/audit/audit.log;echo 'test';done`) } alertOutput, _ := GetAlertsByName(oc, alertBudget) alertName := gjson.Parse(alertOutput).String() alertOutputWarning1 := gjson.Get(alertName, `data.alerts.#(labels.alertname=="`+alertBudget+`")#`).String() alertOutputWarning2 := gjson.Get(alertOutputWarning1, `#(labels.severity=="`+severity+`").state`).String() if strings.Contains(string(alertOutputWarning2), "pending") || strings.Contains(string(alertOutputWarning2), "firing") { e2e.Logf("%s with %s is pending/firing", alertBudget, severity) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errWatcher, fmt.Sprintf("%s with %s is not firing or pending", alertBudget, severity)) })
test case
openshift/openshift-tests-private
c592e603-d6cd-4315-b7a7-ad979dd9007e
Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73880-[Apiserver] Alert KubeAggregatedAPIErrors [Slow] [Disruptive]
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"github.com/tidwall/gjson"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73880-[Apiserver] Alert KubeAggregatedAPIErrors [Slow] [Disruptive]", func() { var ( kubeAlert1 = "KubeAggregatedAPIErrors" kubeAlert2 = "KubeAggregatedAPIDown" alertSeverity = "warning" timeSleep = 720 isAlert1Firing bool isAlert1Pending bool isAlert2Firing bool isAlert2Pending bool ) exutil.By("1. Set network latency to simulate network failure in one master node") e2e.Logf("Checking one available network interface on the master node") masterNode, masterErr := exutil.GetFirstMasterNode(oc) o.Expect(masterErr).NotTo(o.HaveOccurred()) e2e.Logf("Master node is %v : ", masterNode) cmd := `for iface in $(ls /sys/class/net | grep -oP '^(env|ens|eth|enp)\w+'); do ip link show $iface | grep -q 'master' && echo "$iface" || true; done` ethName, ethErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=openshift-kube-apiserver"}, "bash", "-c", cmd) o.Expect(ethErr).NotTo(o.HaveOccurred()) ethName = strings.TrimSpace(ethName) o.Expect(ethName).ShouldNot(o.BeEmpty()) e2e.Logf("Found Ethernet :: %v", ethName) e2e.Logf("Add latency to network on the master node") channel := make(chan string) go func() { defer g.GinkgoRecover() cmdStr := fmt.Sprintf(`tc qdisc add dev %s root netem delay 2000ms; sleep %v; tc qdisc del dev %s root`, ethName, timeSleep, ethName) output, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", fmt.Sprintf("nodes/%s", masterNode), "--", "chroot", "/host", "/bin/bash", "-c", cmdStr).Output() e2e.Logf("Output:%s", output) channel <- output }() defer func() { receivedMsg := <-channel e2e.Logf("ReceivedMsg:%s", receivedMsg) }() exutil.By("2. Check if alerts " + kubeAlert1 + " and " + kubeAlert2 + " are firing/pending") checkAlert := func(alertData, alertName, alertState string) bool { alertPath := `data.alerts.#(labels.alertname=="` + alertName + `" and .state =="` + alertState + `")#` return gjson.Get(alertData, alertPath).Exists() } watchAlerts := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeSleep)*time.Second, false, func(cxt context.Context) (bool, error) { alertOutput, _ := GetAlertsByName(oc, kubeAlert1) if alertOutput == "" { return false, nil } alertData := gjson.Parse(alertOutput).String() if !isAlert1Pending && checkAlert(alertData, kubeAlert1, "pending") { isAlert1Pending = true e2e.Logf("%s with %s is pending", kubeAlert1, alertSeverity) } if checkAlert(alertData, kubeAlert1, "firing") { isAlert1Firing = true e2e.Logf("%s with %s is firing", kubeAlert1, alertSeverity) } if !isAlert2Pending && checkAlert(alertData, kubeAlert2, "pending") { isAlert2Pending = true e2e.Logf("%s with %s is pending", kubeAlert2, alertSeverity) } if checkAlert(alertData, kubeAlert2, "firing") { isAlert2Firing = true e2e.Logf("%s with %s is firing", kubeAlert2, alertSeverity) } if isAlert1Firing && isAlert2Firing { e2e.Logf("%s and %s with %s both are firing", kubeAlert1, kubeAlert2, alertSeverity) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(watchAlerts, fmt.Sprintf("%s and %s with %s are not firing or pending", kubeAlert1, kubeAlert2, alertSeverity)) })
test case
openshift/openshift-tests-private
58260221-fe62-4611-a2b1-35869c6bde27
Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73879-[Apiserver] Alert KubeAPIDown [Slow] [Disruptive]
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"github.com/tidwall/gjson"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-High-73879-[Apiserver] Alert KubeAPIDown [Slow] [Disruptive]", func() { if isSNOCluster(oc) { g.Skip("This is a SNO cluster, skip.") } var ( kubeAlert = "KubeAPIDown" alertSeverity = "critical" timeSleep = 300 ) exutil.By("1. Drop tcp packet to 6443 port to simulate network failure in all master nodes") masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) cmdStr := fmt.Sprintf( `iptables -A OUTPUT -p tcp --dport 6443 -j DROP; iptables -A INPUT -p tcp --dport 6443 -j DROP; sleep %v; iptables -D INPUT -p tcp --dport 6443 -j DROP; iptables -D OUTPUT -p tcp --dport 6443 -j DROP`, timeSleep, ) for _, masterNode := range masterNodes { cmdDump, _, _, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", fmt.Sprintf("nodes/%s", masterNode), "--", "chroot", "/host", "/bin/bash", "-c", cmdStr).Background() o.Expect(err).NotTo(o.HaveOccurred()) defer cmdDump.Process.Kill() } exutil.By("2. Check if the alert " + kubeAlert + " is pending") time.Sleep(30 * time.Second) watchAlert := wait.PollUntilContextTimeout(context.Background(), 6*time.Second, 300*time.Second, true, func(cxt context.Context) (bool, error) { alertOutput, err := GetAlertsByName(oc, kubeAlert) if err != nil || len(alertOutput) == 0 { return false, nil } alertData := gjson.Parse(alertOutput).String() alertItem := gjson.Get(alertData, `data.alerts.#(labels.alertname=="`+kubeAlert+`")#`).String() if len(alertItem) == 0 { return false, nil } e2e.Logf("Alert %s info::%s", kubeAlert, alertItem) alertState := gjson.Get(alertItem, `#(labels.severity=="`+alertSeverity+`").state`).String() if alertState == "pending" { e2e.Logf("State of the alert %s with Severity %s::%s", kubeAlert, alertSeverity, alertState) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(watchAlert, fmt.Sprintf("%s with %s is not firing or pending", kubeAlert, alertSeverity)) // Wait for the cluster to automatically recover and do health check err := clusterOperatorHealthcheck(oc, 360, tmpdir) if err != nil { e2e.Logf("Cluster operators health check failed. Abnormality found in cluster operators.") } })
test case
openshift/openshift-tests-private
8ba10797-3d82-4e84-ba59-1b13d0715162
Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-High-74460-[Apiserver] Enabling TechPreviewNoUpgrade featureset cannot be undone
['"encoding/json"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:kewang-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-High-74460-[Apiserver] Enabling TechPreviewNoUpgrade featureset cannot be undone", func() { const ( featurePatch1 = `[{"op": "replace", "path": "/spec/featureSet", "value": "TechPreviewNoUpgrade"}]` featurePatch2 = `[{"op": "replace", "path": "/spec/featureSet", "value": "CustomNoUpgrade"}]` invalidFeatureGate = `[{"op": "remove", "path": "/spec/featureSet"}]` ) exutil.By("1. Check if the TechPreviewNoUpgrade feature set is already enabled") featureTech, err := getResource(oc, asAdmin, withoutNamespace, "featuregate/cluster", "-o", `jsonpath='{.spec.featureSet}'`) o.Expect(err).NotTo(o.HaveOccurred()) if featureTech != `'TechPreviewNoUpgrade'` { g.Skip("The TechPreviewNoUpgrade feature set of the cluster is not enabled, skip execution!") } e2e.Logf("The %s feature set has been enabled!", featureTech) exutil.By("2. Try to change the feature set value, it should cannot be changed") out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", featurePatch1).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring(`no change`), "Expected no change when patching with TechPreviewNoUpgrade") out, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", featurePatch2).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("invalid"), "Expected 'invalid' in output when patching with CustomNoUpgrade") out, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "--type=json", "-p", invalidFeatureGate).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("invalid"), "Expected 'invalid' in output when removing the featuregate") })
test case
openshift/openshift-tests-private
b775e4cb-52a2-4939-a16f-cbb4fe9b759f
Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-ConnectedOnly-High-53230-[Apiserver] CVE Security Test Kubernetes Validating Admission Webhook Bypass [Serial]
['"encoding/base64"', '"fmt"', '"net/http"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:dpunia-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-ConnectedOnly-High-53230-[Apiserver] CVE Security Test Kubernetes Validating Admission Webhook Bypass [Serial]", func() { exutil.By("Check if it's a proxy cluster") httpProxy, httpsProxy, _ := getGlobalProxy(oc) if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") { g.Skip("Skip for proxy platform") } exutil.By("Get a node name required by test") nodeName, getNodeErr := exutil.GetFirstMasterNode(oc) o.Expect(getNodeErr).NotTo(o.HaveOccurred()) o.Expect(nodeName).NotTo(o.Equal("")) exutil.By("1. Create custom webhook & service") webhookDeployTemplate := getTestDataFilePath("webhook-deploy.yaml") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", webhookDeployTemplate).Execute() err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", webhookDeployTemplate).Execute() o.Expect(err).NotTo(o.HaveOccurred()) podName := getPodsList(oc.AsAdmin(), "validationwebhook") o.Expect(podName).NotTo(o.BeEmpty()) exutil.AssertPodToBeReady(oc, podName[0], "validationwebhook") //Get caBundle used by register webhook. caBundle := ExecCommandOnPod(oc, podName[0], "validationwebhook", `cat /usr/src/app/ca.crt | base64 | tr -d "\n"`) o.Expect(caBundle).NotTo(o.BeEmpty()) exutil.By("2. Register the above created webhook") webhookRegistrationTemplate := getTestDataFilePath("webhook-registration.yaml") params := []string{"-n", "validationwebhook", "-f", webhookRegistrationTemplate, "-p", "NAME=validationwebhook.validationwebhook.svc", "NAMESPACE=validationwebhook", "CABUNDLE=" + caBundle} webhookRegistrationConfigFile := exutil.ProcessTemplate(oc, params...) defer func() { err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", webhookRegistrationConfigFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }() err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", webhookRegistrationConfigFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) parameters := []string{ `{"changeAllowed": "false"}`, `{"changeAllowed": "true"}`, } for index, param := range parameters { exutil.By(fmt.Sprintf("3.%v Node Label Addition Fails Due to Validation Webhook Denial", index+1)) out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("node", nodeName, "-p", fmt.Sprintf(`{"metadata": {"labels": %s}}`, param)).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("denied the request: Validation failed"), fmt.Sprintf("admission webhook \"validationwebhook.validationwebhook.svc\" denied the request: Validation failed with changeAllowed: %s", param)) } })
test case
openshift/openshift-tests-private
56e71ef7-5b50-44f5-a8f0-c1d90db40c39
Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-High-70396-[Apiserver] Add users with different client certificates to access the API Server as cluster-admin [Disruptive]
['"context"', '"encoding/base64"', '"encoding/json"', '"fmt"', '"io/ioutil"', '"os"', '"os/exec"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-NonHyperShiftHOST-ROSA-ARO-OSD_CCS-NonPreRelease-High-70396-[Apiserver] Add users with different client certificates to access the API Server as cluster-admin [Disruptive]", func() { var ( dirname = "/tmp/-OCP-70396-ca/" csrNameDev = "ocpdev-access" fqdnName, port = getApiServerFQDNandPort(oc, false) apiserverCrt = dirname + "ocp-apiserver-cert.crt" customerCustomCas = dirname + "customer-custom-cas.crt" patch = `[{"op": "add", "path": "/spec/clientCA", "value":{"name":"customer-cas-custom"}}]` patchToRecover = `[{"op": "replace", "path": "/spec/clientCA", "value":}]` users = map[string]struct { username string cert string key string csr string customerKey string customerCrt string newKubeconfig string }{ "dev": {"ocpdev", dirname + "ocpdev.crt", dirname + "ocpdev.key", dirname + "ocpdev.csr", "", "", dirname + "ocpdev"}, "tw": {"ocptw", dirname + "ocptw.crt", dirname + "ocptw.key", dirname + "ocptw.csr", dirname + "customer-ca-ocptw.key", dirname + "customer-ca-ocptw.crt", dirname + "ocptw"}, "qe": {"ocpqe", dirname + "ocpqe.crt", dirname + "ocpqe.key", dirname + "ocpqe.csr", dirname + "customer-ca-ocpqe.key", dirname + "customer-ca-ocpqe.crt", dirname + "ocpqe"}, } ) defer os.RemoveAll(dirname) err := os.MkdirAll(dirname, 0755) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("1. Creating the client certificate for ocpdev using the internal OpenShift CA") exutil.By("1.1 Creating a CSR for the client certificate using the openssl client") userDetails, _ := users["dev"] opensslCmd := fmt.Sprintf(`openssl req -nodes -newkey rsa:4096 -keyout %s -subj "/O=system:admin/CN=%s" -out %s`, userDetails.key, userDetails.username, userDetails.csr) _, err = exec.Command("bash", "-c", opensslCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("1.2 Read the CSR file and encode it in base64") csrData, err := os.ReadFile(userDetails.csr) if err != nil { e2e.Failf("Failed to read CSR file: %v", err) } csrBase64 := base64.StdEncoding.EncodeToString(csrData) exutil.By("1.3 Submit the CSR to OpenShift in order to sign it with the internal CA") csrYAML := fmt.Sprintf(`apiVersion: certificates.k8s.io/v1 kind: CertificateSigningRequest metadata: name: ocpdev-access spec: signerName: kubernetes.io/kube-apiserver-client groups: - system:authenticated request: %s usages: - client auth `, csrBase64) defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("-n", "default", "-f", "-").InputString(csrYAML).Output() _, submitCsrErr := oc.WithoutNamespace().NotShowInfo().AsAdmin().Run("create").Args("-n", "default", "-f", "-").InputString(csrYAML).Output() o.Expect(submitCsrErr).NotTo(o.HaveOccurred()) exutil.By("1.4 Approve the certificate pending request") getCsr, getCsrErr := getPendingCSRs(oc) o.Expect(getCsrErr).NotTo(o.HaveOccurred()) appCsrErr := oc.WithoutNamespace().AsAdmin().Run("adm").Args("certificate", "approve", getCsr[0]).Execute() o.Expect(appCsrErr).NotTo(o.HaveOccurred()) exutil.By("1.5 Get CSR certificate after approved") certBase := getResourceToBeReady(oc, asAdmin, withoutNamespace, "csr", csrNameDev, `-o=jsonpath={.status.certificate}`) o.Expect(certBase).NotTo(o.BeEmpty()) // Decode the base64 encoded certificate certDecoded, certDecodedErr := base64.StdEncoding.DecodeString(string(certBase)) o.Expect(certDecodedErr).NotTo(o.HaveOccurred()) // Write the decoded certificate to a file csrDevCrtErr := os.WriteFile(userDetails.cert, certDecoded, 0644) o.Expect(csrDevCrtErr).NotTo(o.HaveOccurred()) e2e.Logf("Certificate saved to %s\n", userDetails.cert) exutil.By("2. Creating the client certificate for user ocptw using the customer-signer-custom self-signed CA") exutil.By("2.1 Create one self-signed CA using the openssl client") userDetails, _ = users["tw"] opensslOcptwCmd := fmt.Sprintf(`openssl genrsa -out %s 4096`, userDetails.customerKey) _, err = exec.Command("bash", "-c", opensslOcptwCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) opensslOcptwCmd = fmt.Sprintf(`openssl req -x509 -new -nodes -key %s -sha256 -days 9999 -out %s -subj "/OU=openshift/CN=customer-signer-custom"`, userDetails.customerKey, userDetails.customerCrt) _, err = exec.Command("bash", "-c", opensslOcptwCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2.2 Create CSR for ocptw's client cert") opensslOcptwCmd = fmt.Sprintf(`openssl req -nodes -newkey rsa:4096 -keyout %s -subj "/O=system:admin/CN=%s" -out %s`, userDetails.key, userDetails.username, userDetails.csr) _, err = exec.Command("bash", "-c", opensslOcptwCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2.3 Sign CSR for ocptw") opensslOcptwCmd = fmt.Sprintf(`openssl x509 -extfile <(printf "extendedKeyUsage = clientAuth") -req -in %s -CA %s -CAkey %s -CAcreateserial -out %s -days 9999 -sha256`, userDetails.csr, userDetails.customerCrt, userDetails.customerKey, userDetails.cert) _, err = exec.Command("bash", "-c", opensslOcptwCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By(`3. Creating the client certificate for ocpqe using the customer-signer-custom-2 self-signed CA and using group “system:admin” for username “ocpqe”.`) exutil.By("3.1 Create one self-signed CA using the openssl client for user ocpqe") userDetails, _ = users["qe"] opensslOcpqeCmd := fmt.Sprintf(`openssl genrsa -out %s 4096`, userDetails.customerKey) _, err = exec.Command("bash", "-c", opensslOcpqeCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) opensslOcpqeCmd = fmt.Sprintf(`openssl req -x509 -new -nodes -key %s -sha256 -days 9999 -out %s -subj "/OU=openshift/CN=customer-signer-custom-2"`, userDetails.customerKey, userDetails.customerCrt) _, err = exec.Command("bash", "-c", opensslOcpqeCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3.2 Create CSR for ocpqe's client cert") opensslOcpqeCmd = fmt.Sprintf(`openssl req -nodes -newkey rsa:4096 -keyout %s -subj "/O=system:admin/CN=ocpqe" -out %s`, userDetails.key, userDetails.csr) _, err = exec.Command("bash", "-c", opensslOcpqeCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3.3 Sign CSR for ocpqe") opensslOcpqeCmd = fmt.Sprintf(`openssl x509 -extfile <(printf "extendedKeyUsage = clientAuth") -req -in %s -CA %s -CAkey %s -CAcreateserial -out %s -days 9999 -sha256`, userDetails.csr, userDetails.customerCrt, userDetails.customerKey, userDetails.cert) _, err = exec.Command("bash", "-c", opensslOcpqeCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("4. Creating the kubeconfig files for ocpdev, ocptw and ocpqe") endpointUrl := fmt.Sprintf("https://%s:%s", fqdnName, port) pemCert, err := fetchOpenShiftAPIServerCert(endpointUrl) if err != nil { e2e.Failf("Failed to fetch certificate: %v", err) } else { // Write the PEM-encoded certificate to the output file if err := ioutil.WriteFile(apiserverCrt, pemCert, 0644); err != nil { e2e.Failf("Error writing certificate to file: %v", err) } else { e2e.Logf("Certificate written to %s\n", apiserverCrt) } } i := 1 for _, userDetails := range users { exutil.By(fmt.Sprintf("4.%d Create kubeconfig for user %s", i, userDetails.username)) err = oc.AsAdmin().WithoutNamespace().WithoutKubeconf().Run("--kubeconfig").Args(userDetails.newKubeconfig, "config", "set-credentials", userDetails.username, "--client-certificate="+userDetails.cert, "--client-key="+userDetails.key, "--embed-certs=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.AsAdmin().WithoutNamespace().WithoutKubeconf().Run("--kubeconfig").Args(userDetails.newKubeconfig, "config", "set-cluster", "openshift-cluster-dev", "--certificate-authority="+apiserverCrt, "--embed-certs=true", "--server=https://"+fqdnName+":"+port).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.AsAdmin().WithoutNamespace().WithoutKubeconf().Run("--kubeconfig").Args(userDetails.newKubeconfig, "config", "set-context", "openshift-dev", "--cluster=openshift-cluster-dev", "--namespace=default", "--user="+userDetails.username).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.AsAdmin().WithoutNamespace().WithoutKubeconf().Run("--kubeconfig").Args(userDetails.newKubeconfig, "config", "use-context", "openshift-dev").Execute() o.Expect(err).NotTo(o.HaveOccurred()) i = i + 1 exutil.By(fmt.Sprintf("4.%d Accessing the cluster with the new kubeconfig files for user %s", i, userDetails.username)) if userDetails.username == "ocpdev" { _, err = getResourceWithKubeconfig(oc, userDetails.newKubeconfig, true, "whoami") o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err = getResourceWithKubeconfig(oc, userDetails.newKubeconfig, false, "whoami") o.Expect(err).To(o.HaveOccurred()) } i = i + 1 } exutil.By("5. Create the client-ca ConfigMap") caCmd := fmt.Sprintf(`cat %s %s > %s`, users["tw"].customerCrt, users["qe"].customerCrt, customerCustomCas) _, err = exec.Command("bash", "-c", caCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "customer-cas-custom", "-n", "openshift-config").Execute() err = oc.AsAdmin().WithoutNamespace().Run("create").Args("configmap", "customer-cas-custom", "-n", "openshift-config", fmt.Sprintf(`--from-file=ca-bundle.crt=%s`, customerCustomCas)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. Patching apiserver") defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patchToRecover).Execute() err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute() o.Expect(err).NotTo(o.HaveOccurred()) i = 1 for _, userDetails := range users { exutil.By(fmt.Sprintf("7.%d Accessing the cluster again with the new kubeconfig files for user %s", i, userDetails.username)) output, err := getResourceWithKubeconfig(oc, userDetails.newKubeconfig, true, "whoami") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(userDetails.username)) i = i + 1 //Try to do other stuff like listing pods, nodes, etc. we will see that we don’t have access to that. That’s expected since in a default OCP installation we don’t have RBAC rules for the system:admin group. exutil.By(fmt.Sprintf("7.%d, Try to do other stuff like listing pods, nodes before applying RBAC policy", i)) _, err = getResourceWithKubeconfig(oc, userDetails.newKubeconfig, false, "get", "pods") o.Expect(err).To(o.HaveOccurred()) _, err = getResourceWithKubeconfig(oc, userDetails.newKubeconfig, false, "get", "nodes") o.Expect(err).To(o.HaveOccurred()) i = i + 1 } exutil.By("8. Configure users in the system:admin group as cluster admins") defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-admin", "system:admin").Execute() err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-admin", "system:admin").Execute() o.Expect(err).NotTo(o.HaveOccurred()) i = 1 for _, userDetails := range users { exutil.By(fmt.Sprintf("8.%d, Try again stuff like listing pods, nodes after applying RBAC policy", i)) _, err = getResourceWithKubeconfig(oc, userDetails.newKubeconfig, true, "get", "pod", "-n", "openshift-apiserver") o.Expect(err).NotTo(o.HaveOccurred()) _, err = getResourceWithKubeconfig(oc, userDetails.newKubeconfig, true, "get", "nodes") o.Expect(err).NotTo(o.HaveOccurred()) i = i + 1 } })
test case
openshift/openshift-tests-private
772b6ab5-223a-4930-bd80-8eae38cacc08
Author:rgangwar-LEVEL0-ROSA-ARO-OSD_CCS-ConnectedOnly-Critical-77919-[Apiserver] HPA/oc scale and DeploymenConfig Should be working [Disruptive]
['"context"', '"crypto/tls"', '"encoding/base64"', '"fmt"', '"net/http"', '"os/exec"', '"regexp"', '"strings"', '"time"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver.go
g.It("Author:rgangwar-LEVEL0-ROSA-ARO-OSD_CCS-ConnectedOnly-Critical-77919-[Apiserver] HPA/oc scale and DeploymenConfig Should be working [Disruptive]", func() { if isBaselineCapsSet(oc) && !(isEnabledCapability(oc, "Build") && isEnabledCapability(oc, "DeploymentConfig")) { g.Skip("Skipping the test as baselinecaps have been set and some of API capabilities are not enabled!") } errNS := oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", "opa", "--ignore-not-found").Execute() o.Expect(errNS).NotTo(o.HaveOccurred()) var ( caKeypem = tmpdir + "/caKey.pem" caCertpem = tmpdir + "/caCert.pem" serverKeypem = tmpdir + "/serverKey.pem" serverconf = tmpdir + "/server.conf" serverWithSANcsr = tmpdir + "/serverWithSAN.csr" serverCertWithSAN = tmpdir + "/serverCertWithSAN.pem" randomStr = exutil.GetRandomString() ) defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", "opa", "--ignore-not-found").Execute() defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", "test-ns"+randomStr, "--ignore-not-found").Execute() defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ValidatingWebhookConfiguration", "opa-validating-webhook", "--ignore-not-found").Execute() defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrolebinding.rbac.authorization.k8s.io/opa-viewer", "--ignore-not-found").Execute() // Skipped case on arm64 and proxy cluster with techpreview exutil.By("Check if it's a proxy cluster with techpreview") featureTech, err := getResource(oc, asAdmin, withoutNamespace, "featuregate", "cluster", "-o=jsonpath={.spec.featureSet}") o.Expect(err).NotTo(o.HaveOccurred()) httpProxy, _, _ := getGlobalProxy(oc) if (strings.Contains(httpProxy, "http") && strings.Contains(featureTech, "TechPreview")) || checkDisconnect(oc) { g.Skip("Skip for proxy platform with techpreview or disconnected env") } architecture.SkipNonAmd64SingleArch(oc) exutil.By("1. Create certificates with SAN.") opensslCMD := fmt.Sprintf("openssl genrsa -out %v 2048", caKeypem) _, caKeyErr := exec.Command("bash", "-c", opensslCMD).Output() o.Expect(caKeyErr).NotTo(o.HaveOccurred()) opensslCMD = fmt.Sprintf(`openssl req -x509 -new -nodes -key %v -days 100000 -out %v -subj "/CN=wb_ca"`, caKeypem, caCertpem) _, caCertErr := exec.Command("bash", "-c", opensslCMD).Output() o.Expect(caCertErr).NotTo(o.HaveOccurred()) opensslCMD = fmt.Sprintf("openssl genrsa -out %v 2048", serverKeypem) _, serverKeyErr := exec.Command("bash", "-c", opensslCMD).Output() o.Expect(serverKeyErr).NotTo(o.HaveOccurred()) serverconfCMD := fmt.Sprintf(`cat > %v << EOF [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, serverAuth subjectAltName = @alt_names [alt_names] IP.1 = 127.0.0.1 DNS.1 = opa.opa.svc EOF`, serverconf) _, serverconfErr := exec.Command("bash", "-c", serverconfCMD).Output() o.Expect(serverconfErr).NotTo(o.HaveOccurred()) serverWithSANCMD := fmt.Sprintf(`openssl req -new -key %v -out %v -subj "/CN=opa.opa.svc" -config %v`, serverKeypem, serverWithSANcsr, serverconf) _, serverWithSANErr := exec.Command("bash", "-c", serverWithSANCMD).Output() o.Expect(serverWithSANErr).NotTo(o.HaveOccurred()) serverCertWithSANCMD := fmt.Sprintf(`openssl x509 -req -in %v -CA %v -CAkey %v -CAcreateserial -out %v -days 100000 -extensions v3_req -extfile %s`, serverWithSANcsr, caCertpem, caKeypem, serverCertWithSAN, serverconf) _, serverCertWithSANErr := exec.Command("bash", "-c", serverCertWithSANCMD).Output() o.Expect(serverCertWithSANErr).NotTo(o.HaveOccurred()) e2e.Logf("1. Step passed: SAN certificate has been generated") exutil.By("2. Create new secret with SAN cert.") opaOutput, opaerr := oc.Run("create").Args("namespace", "opa").Output() o.Expect(opaerr).NotTo(o.HaveOccurred()) o.Expect(opaOutput).Should(o.ContainSubstring("namespace/opa created"), "namespace/opa not created...") opasecretOutput, opaerr := oc.Run("create").Args("secret", "tls", "opa-server", "--cert="+serverCertWithSAN, "--key="+serverKeypem, "-n", "opa").Output() o.Expect(opaerr).NotTo(o.HaveOccurred()) o.Expect(opasecretOutput).Should(o.ContainSubstring("secret/opa-server created"), "secret/opa-server not created...") e2e.Logf("2. Step passed: %v with SAN certificate", opasecretOutput) exutil.By("3. Create admission webhook") policyOutput, policyerr := oc.WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "privileged", "-z", "default", "-n", "opa").Output() o.Expect(policyerr).NotTo(o.HaveOccurred()) o.Expect(policyOutput).Should(o.ContainSubstring(`clusterrole.rbac.authorization.k8s.io/system:openshift:scc:privileged added: "default"`), "Policy scc privileged not default") admissionTemplate := getTestDataFilePath("ocp55494-admission-controller.yaml") admissionOutput, admissionerr := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", admissionTemplate).Output() o.Expect(admissionerr).NotTo(o.HaveOccurred()) admissionOutput1 := regexp.MustCompile(`\n`).ReplaceAllString(string(admissionOutput), "") admissionOutput2 := `clusterrolebinding.rbac.authorization.k8s.io/opa-viewer.*role.rbac.authorization.k8s.io/configmap-modifier.*rolebinding.rbac.authorization.k8s.io/opa-configmap-modifier.*service/opa.*deployment.apps/opa.*configmap/opa-default-system-main` o.Expect(admissionOutput1).Should(o.MatchRegexp(admissionOutput2), "3. Step failed: Admission controller not created as expected") e2e.Logf("3. Step passed: Admission controller webhook ::\n %v", admissionOutput) exutil.By("4. Create webhook with certificates with SAN.") csrpemcmd := `cat ` + serverCertWithSAN + ` | base64 | tr -d '\n'` csrpemcert, csrpemErr := exec.Command("bash", "-c", csrpemcmd).Output() o.Expect(csrpemErr).NotTo(o.HaveOccurred()) webhookTemplate := getTestDataFilePath("ocp77919-webhook-configuration.yaml") exutil.CreateClusterResourceFromTemplate(oc.NotShowInfo(), "--ignore-unknown-parameters=true", "-f", webhookTemplate, "-n", "opa", "-p", `SERVERCERT=`+string(csrpemcert)) e2e.Logf("4. Step passed: opa-validating-webhook created with SAN certificate") exutil.By("5. Check rollout latest deploymentconfig.") tmpnsOutput, tmpnserr := oc.Run("create").Args("ns", "test-ns"+randomStr).Output() o.Expect(tmpnserr).NotTo(o.HaveOccurred()) o.Expect(tmpnsOutput).Should(o.ContainSubstring(fmt.Sprintf("namespace/test-ns%v created", randomStr)), fmt.Sprintf("namespace/test-ns%v not created", randomStr)) e2e.Logf("namespace/test-ns%v created", randomStr) tmplabelOutput, tmplabelErr := oc.Run("label").Args("ns", "test-ns"+randomStr, "openpolicyagent.org/webhook=ignore").Output() o.Expect(tmplabelErr).NotTo(o.HaveOccurred()) o.Expect(tmplabelOutput).Should(o.ContainSubstring(fmt.Sprintf("namespace/test-ns%v labeled", randomStr)), fmt.Sprintf("namespace/test-ns%v not labeled", randomStr)) e2e.Logf("namespace/test-ns%v labeled", randomStr) var ( deployErr error deployOutput string ) deployConfigErr := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { deployOutput, deployErr = oc.WithoutNamespace().AsAdmin().Run("create").Args("deploymentconfig", "mydc", "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", "test-ns"+randomStr).Output() if deployErr != nil { return false, nil } o.Expect(deployOutput).Should(o.ContainSubstring("deploymentconfig.apps.openshift.io/mydc created"), "deploymentconfig.apps.openshift.io/mydc not created") e2e.Logf("deploymentconfig.apps.openshift.io/mydc created") return true, nil }) exutil.AssertWaitPollNoErr(deployConfigErr, fmt.Sprintf("Not able to create mydc deploymentconfig :: %v", deployErr)) waiterrRollout := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { rollOutput, _ := oc.WithoutNamespace().AsAdmin().Run("rollout").Args("latest", "dc/mydc", "-n", "test-ns"+randomStr).Output() if strings.Contains(rollOutput, "rolled out") { o.Expect(rollOutput).Should(o.ContainSubstring("deploymentconfig.apps.openshift.io/mydc rolled out")) e2e.Logf("5. Step passed: deploymentconfig.apps.openshift.io/mydc rolled out latest deploymentconfig.") return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(waiterrRollout, "5. Step failed: deploymentconfig.apps.openshift.io/mydc not rolled out") exutil.By("6. Try to scale deployment config, oc scale should work without error") waitScaleErr := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { scaleErr := oc.WithoutNamespace().AsAdmin().Run("scale").Args("dc/mydc", "--replicas=10", "-n", "test-ns"+randomStr).Execute() if scaleErr == nil { return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(waitScaleErr, "5. Step failed: deploymentconfig.apps.openshift.io/mydc not scaled out") })
file
openshift/openshift-tests-private
f21f7fb1-a7a1-4aab-8bdc-f18863397d3f
apiserver_util
import ( "bufio" "context" "crypto/tls" "crypto/x509" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "math/rand" "net" "net/http" "net/url" "os" "os/exec" "path/filepath" "reflect" "regexp" "strconv" "strings" "time" "k8s.io/apimachinery/pkg/util/wait" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" "github.com/tidwall/gjson" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/openshift-tests-private/test/extended/util" exutil "github.com/openshift/openshift-tests-private/test/extended/util" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2e "k8s.io/kubernetes/test/e2e/framework" )
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
package apiserverauth import ( "bufio" "context" "crypto/tls" "crypto/x509" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "math/rand" "net" "net/http" "net/url" "os" "os/exec" "path/filepath" "reflect" "regexp" "strconv" "strings" "time" "k8s.io/apimachinery/pkg/util/wait" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" "github.com/tidwall/gjson" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/openshift-tests-private/test/extended/util" exutil "github.com/openshift/openshift-tests-private/test/extended/util" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2e "k8s.io/kubernetes/test/e2e/framework" ) // fixturePathCache to store fixture path mapping, key: dir name under testdata, value: fixture path var fixturePathCache = make(map[string]string) type admissionWebhook struct { name string webhookname string servicenamespace string servicename string namespace string apigroups string apiversions string operations string resources string version string pluralname string singularname string kind string shortname string template string } type service struct { name string clusterip string namespace string template string } const ( asAdmin = true withoutNamespace = true contain = false ok = true defaultRegistryServiceURL = "image-registry.openshift-image-registry.svc:5000" ) type User struct { Username string Password string } // createAdmissionWebhookFromTemplate : Used for creating different admission hooks from pre-existing template. func (admissionHook *admissionWebhook) createAdmissionWebhookFromTemplate(oc *exutil.CLI) { exutil.CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", admissionHook.template, "-p", "NAME="+admissionHook.name, "WEBHOOKNAME="+admissionHook.webhookname, "SERVICENAMESPACE="+admissionHook.servicenamespace, "SERVICENAME="+admissionHook.servicename, "NAMESPACE="+admissionHook.namespace, "APIGROUPS="+admissionHook.apigroups, "APIVERSIONS="+admissionHook.apiversions, "OPERATIONS="+admissionHook.operations, "RESOURCES="+admissionHook.resources, "KIND="+admissionHook.kind, "SHORTNAME="+admissionHook.shortname, "SINGULARNAME="+admissionHook.singularname, "PLURALNAME="+admissionHook.pluralname, "VERSION="+admissionHook.version) } func (service *service) createServiceFromTemplate(oc *exutil.CLI) { exutil.CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", service.template, "-p", "NAME="+service.name, "CLUSTERIP="+service.clusterip, "NAMESPACE="+service.namespace) } func compareAPIServerWebhookConditions(oc *exutil.CLI, conditionReason interface{}, conditionStatus string, conditionTypes []string) { for _, webHookErrorConditionType := range conditionTypes { // increase wait time for prow ci failures err := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { webhookError, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiserver/cluster", "-o", `jsonpath='{.status.conditions[?(@.type=="`+webHookErrorConditionType+`")]}'`).Output() o.Expect(err).NotTo(o.HaveOccurred()) //Inline conditional statement for evaluating 1) reason and status together,2) only status. webhookConditionStatus := gjson.Get(webhookError, `status`).String() // If webhook errors from the created flowcollectorconversionwebhook by case OCP-73539, // the webhook condition status will be "True", not the expected "False" if strings.Contains(webhookError, "flows.netobserv.io: dial tcp") { conditionStatus = "True" } isWebhookConditionMet := containsAnyWebHookReason(webhookError, conditionReason) && webhookConditionStatus == conditionStatus if isWebhookConditionMet { e2e.Logf("kube-apiserver admission webhook errors as \n %s ::: %s ::: %s ::: %s", conditionStatus, webhookError, webHookErrorConditionType, conditionReason) o.Expect(webhookError).Should(o.MatchRegexp(`"type":"%s"`, webHookErrorConditionType), "Mismatch in 'type' of admission errors reported") o.Expect(webhookError).Should(o.MatchRegexp(`"status":"%s"`, conditionStatus), "Mismatch in 'status' of admission errors reported") return true, nil } // Adding logging for more debug e2e.Logf("Retrying for expected kube-apiserver admission webhook error ::: %s ::: %s ::: %s ::: %s", conditionStatus, webhookError, webHookErrorConditionType, conditionReason) return false, nil }) if err != nil { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ValidatingWebhookConfiguration").Output() e2e.Logf("#### Debug #### List all ValidatingWebhookConfiguration when the case runs into failures:%s\n", output) exutil.AssertWaitPollNoErr(err, "Test Fail: Expected Kube-apiserver admissionwebhook errors not present.") } } } // GetEncryptionPrefix : func GetEncryptionPrefix(oc *exutil.CLI, key string) (string, error) { var etcdPodName string encryptionType, err1 := oc.WithoutNamespace().Run("get").Args("apiserver/cluster", "-o=jsonpath={.spec.encryption.type}").Output() o.Expect(err1).NotTo(o.HaveOccurred()) if encryptionType != "aesabc" && encryptionType != "aesgcm" { e2e.Logf("The etcd is not encrypted on!") } err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { podName, err := oc.WithoutNamespace().Run("get").Args("pods", "-n", "openshift-etcd", "-l=etcd", "-o=jsonpath={.items[0].metadata.name}").Output() if err != nil { e2e.Logf("Fail to get etcd pod, error: %s. Trying again", err) return false, nil } etcdPodName = podName return true, nil }) if err != nil { return "", err } var encryptionPrefix string err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { prefix, err := oc.WithoutNamespace().Run("rsh").Args("-n", "openshift-etcd", "-c", "etcd", etcdPodName, "bash", "-c", `etcdctl get `+key+` --prefix -w fields | grep -e "Value" | grep -o k8s:enc:`+encryptionType+`:v1:[^:]*: | head -n 1`).Output() if err != nil { e2e.Logf("Fail to rsh into etcd pod, error: %s. Trying again", err) return false, nil } encryptionPrefix = prefix return true, nil }) if err != nil { return "", err } return encryptionPrefix, nil } // GetEncryptionKeyNumber : func GetEncryptionKeyNumber(oc *exutil.CLI, patten string) (int, error) { secretNames, err := oc.WithoutNamespace().Run("get").Args("secrets", "-n", "openshift-config-managed", `-o=jsonpath={.items[*].metadata.name}`, "--sort-by=metadata.creationTimestamp").Output() if err != nil { e2e.Logf("Fail to get secret, error: %s", err) return 0, nil } rePattern := regexp.MustCompile(patten) locs := rePattern.FindAllStringIndex(secretNames, -1) i, j := locs[len(locs)-1][0], locs[len(locs)-1][1] maxSecretName := secretNames[i:j] strSlice := strings.Split(maxSecretName, "-") var number int number, err = strconv.Atoi(strSlice[len(strSlice)-1]) if err != nil { e2e.Logf("Fail to get secret, error: %s", err) return 0, nil } return number, nil } // WaitEncryptionKeyMigration : func WaitEncryptionKeyMigration(oc *exutil.CLI, secret string) (bool, error) { var pattern string var waitTime time.Duration if strings.Contains(secret, "openshift-apiserver") { pattern = `migrated-resources: .*route.openshift.io.*routes` waitTime = 15 * time.Minute } else if strings.Contains(secret, "openshift-kube-apiserver") { pattern = `migrated-resources: .*configmaps.*secrets.*` waitTime = 30 * time.Minute // see below explanation } else { return false, errors.New("Unknown key " + secret) } rePattern := regexp.MustCompile(pattern) // In observation, the waiting time in max can take 25 mins if it is kube-apiserver, // and 12 mins if it is openshift-apiserver, so the Poll parameters are long. err := wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, waitTime, false, func(cxt context.Context) (bool, error) { output, err := oc.WithoutNamespace().Run("get").Args("secrets", secret, "-n", "openshift-config-managed", "-o=yaml").Output() if err != nil { e2e.Logf("Fail to get the encryption key secret %s, error: %s. Trying again", secret, err) return false, nil } matchedStr := rePattern.FindString(output) if matchedStr == "" { e2e.Logf("Not yet see migrated-resources. Trying again") return false, nil } e2e.Logf("Saw all migrated-resources:\n%s", matchedStr) return true, nil }) if err != nil { return false, err } return true, nil } // CheckIfResourceAvailable : func CheckIfResourceAvailable(oc *exutil.CLI, resource string, resourceNames []string, namespace ...string) (string, bool) { args := append([]string{resource}, resourceNames...) if len(namespace) == 1 { args = append(args, "-n", namespace[0]) // HACK: implement no namespace input } out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output() if err == nil { for _, resourceName := range resourceNames { o.Expect(out).Should(o.ContainSubstring(resourceName)) return out, true } } else { e2e.Logf("Debug logs :: Resource '%s' not found :: %s :: %s\n", resource, out, err.Error()) return out, false } return "", true } func waitCoBecomes(oc *exutil.CLI, coName string, waitTime int, expectedStatus map[string]string) error { errCo := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { gottenStatus := getCoStatus(oc, coName, expectedStatus) eq := reflect.DeepEqual(expectedStatus, gottenStatus) if eq { eq := reflect.DeepEqual(expectedStatus, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) if eq { // For True False False, we want to wait some bit more time and double check, to ensure it is stably healthy time.Sleep(100 * time.Second) gottenStatus := getCoStatus(oc, coName, expectedStatus) eq := reflect.DeepEqual(expectedStatus, gottenStatus) if eq { e2e.Logf("Given operator %s becomes available/non-progressing/non-degraded", coName) return true, nil } } else { e2e.Logf("Given operator %s becomes %s", coName, gottenStatus) return true, nil } } return false, nil }) if errCo != nil { err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } return errCo } func getCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) map[string]string { newStatusToCompare := make(map[string]string) for key := range statusToCompare { args := fmt.Sprintf(`-o=jsonpath={.status.conditions[?(.type == '%s')].status}`, key) status, _ := getResource(oc, asAdmin, withoutNamespace, "co", coName, args) newStatusToCompare[key] = status } return newStatusToCompare } // Check ciphers for authentication operator cliconfig, openshiftapiservers.operator.openshift.io and kubeapiservers.operator.openshift.io: func verifyCiphers(oc *exutil.CLI, expectedCipher string, operator string) error { return wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { switch operator { case "openshift-authentication": e2e.Logf("Get the ciphers for openshift-authentication:") getadminoutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-authentication", "v4-0-config-system-cliconfig", "-o=jsonpath='{.data.v4-0-config-system-cliconfig}'").Output() if err == nil { // Use jqCMD to call jq because .servingInfo part JSON comming in string format jqCMD := fmt.Sprintf(`echo %s | jq -cr '.servingInfo | "\(.cipherSuites) \(.minTLSVersion)"'|tr -d '\n'`, getadminoutput) output, err := exec.Command("bash", "-c", jqCMD).Output() o.Expect(err).NotTo(o.HaveOccurred()) gottenCipher := string(output) e2e.Logf("Comparing the ciphers: %s with %s", expectedCipher, gottenCipher) if expectedCipher == gottenCipher { e2e.Logf("Ciphers are matched: %s", gottenCipher) return true, nil } e2e.Logf("Ciphers are not matched: %s", gottenCipher) return false, nil } return false, nil case "openshiftapiservers.operator", "kubeapiservers.operator": e2e.Logf("Get the ciphers for %s:", operator) getadminoutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(operator, "cluster", "-o=jsonpath={.spec.observedConfig.servingInfo['cipherSuites', 'minTLSVersion']}").Output() if err == nil { e2e.Logf("Comparing the ciphers: %s with %s", expectedCipher, getadminoutput) if expectedCipher == getadminoutput { e2e.Logf("Ciphers are matched: %s", getadminoutput) return true, nil } e2e.Logf("Ciphers are not matched: %s", getadminoutput) return false, nil } return false, nil default: e2e.Logf("Operators parameters not correct..") } return false, nil }) } func restoreClusterOcp41899(oc *exutil.CLI) { e2e.Logf("Checking openshift-controller-manager operator should be Available") expectedStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} err := waitCoBecomes(oc, "openshift-controller-manager", 500, expectedStatus) exutil.AssertWaitPollNoErr(err, "openshift-controller-manager operator is not becomes available") output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "openshift-config").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "client-ca-custom") { configmapErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "client-ca-custom", "-n", "openshift-config").Execute() o.Expect(configmapErr).NotTo(o.HaveOccurred()) e2e.Logf("Cluster configmap reset to default values") } else { e2e.Logf("Cluster configmap not changed from default values") } } func checkClusterLoad(oc *exutil.CLI, nodeType, dirname string) (int, int) { var tmpPath string var errAdm error errAdmNode := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { tmpPath, errAdm = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "nodes", "-l", "node-role.kubernetes.io/"+nodeType, "--no-headers").OutputToFile(dirname) if errAdm != nil { return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(errAdmNode, fmt.Sprintf("Not able to run adm top command :: %v", errAdm)) cmd := fmt.Sprintf(`cat %v | grep -v 'protocol-buffers' | awk '{print $3}'|awk -F '%%' '{ sum += $1 } END { print(sum / NR) }'|cut -d "." -f1`, tmpPath) cpuAvg, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cmd = fmt.Sprintf(`cat %v | grep -v 'protocol-buffers' | awk '{print $5}'|awk -F'%%' '{ sum += $1 } END { print(sum / NR) }'|cut -d "." -f1`, tmpPath) memAvg, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) re, _ := regexp.Compile(`[^\w]`) cpuAvgs := string(cpuAvg) memAvgs := string(memAvg) cpuAvgs = re.ReplaceAllString(cpuAvgs, "") memAvgs = re.ReplaceAllString(memAvgs, "") cpuAvgVal, _ := strconv.Atoi(cpuAvgs) memAvgVal, _ := strconv.Atoi(memAvgs) return cpuAvgVal, memAvgVal } func checkResources(oc *exutil.CLI, dirname string) map[string]string { resUsedDet := make(map[string]string) resUsed := []string{"secrets", "deployments", "namespaces", "pods"} for _, key := range resUsed { tmpPath, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(key, "-A", "--no-headers").OutputToFile(dirname) o.Expect(err).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf(`cat %v | wc -l | awk '{print $1}'`, tmpPath) output, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) resUsedDet[key] = string(output) } return resUsedDet } func getTestDataFilePath(filename string) string { // returns the file path of the testdata files with respect to apiserverauth subteam. apiDirName := "apiserverauth" apiBaseDir := "" if apiBaseDir = fixturePathCache[apiDirName]; len(apiBaseDir) == 0 { e2e.Logf("apiserver fixture dir is not initialized, start to create") apiBaseDir = exutil.FixturePath("testdata", apiDirName) fixturePathCache[apiDirName] = apiBaseDir e2e.Logf("apiserver fixture dir is initialized: %s", apiBaseDir) } else { apiBaseDir = fixturePathCache[apiDirName] e2e.Logf("apiserver fixture dir found in cache: %s", apiBaseDir) } return filepath.Join(apiBaseDir, filename) } func checkCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) { // Check ,compare and assert the current cluster operator status against the expected status given. currentCoStatus := getCoStatus(oc, coName, statusToCompare) o.Expect(reflect.DeepEqual(currentCoStatus, statusToCompare)).To(o.Equal(true), "Wrong %s CO status reported, actual status : %s", coName, currentCoStatus) } func getNodePortRange(oc *exutil.CLI) (int, int) { // Follow the steps in https://docs.openshift.com/container-platform/4.11/networking/configuring-node-port-service-range.html output, err := oc.AsAdmin().Run("get").Args("configmaps", "-n", "openshift-kube-apiserver", "config", `-o=jsonpath="{.data['config\.yaml']}"`).Output() o.Expect(err).NotTo(o.HaveOccurred()) rgx := regexp.MustCompile(`"service-node-port-range":\["([0-9]*)-([0-9]*)"\]`) rs := rgx.FindSubmatch([]byte(output)) o.Expect(rs).To(o.HaveLen(3)) leftBound, err := strconv.Atoi(string(rs[1])) o.Expect(err).NotTo(o.HaveOccurred()) rightBound, err := strconv.Atoi(string(rs[2])) o.Expect(err).NotTo(o.HaveOccurred()) return leftBound, rightBound } // Get a random number of int32 type [m,n], n > m func getRandomNum(m int32, n int32) int32 { rand.Seed(time.Now().UnixNano()) return rand.Int31n(n-m+1) + m } func countResource(oc *exutil.CLI, resource string, namespace string) (int, error) { output, err := oc.Run("get").Args(resource, "-n", namespace, "-o", "jsonpath='{.items[*].metadata.name}'").Output() output = strings.Trim(strings.Trim(output, " "), "'") if output == "" { return 0, err } resources := strings.Split(output, " ") return len(resources), err } // GetAlertsByName get all the alerts func GetAlertsByName(oc *exutil.CLI, alertName string) (string, error) { mon, monErr := exutil.NewPrometheusMonitor(oc.AsAdmin()) if monErr != nil { return "", monErr } allAlerts, allAlertErr := mon.GetAlerts() if allAlertErr != nil { return "", allAlertErr } return allAlerts, nil } func isSNOCluster(oc *exutil.CLI) bool { //Only 1 master, 1 worker node and with the same hostname. masterNodes, _ := exutil.GetClusterNodesBy(oc, "master") workerNodes, _ := exutil.GetClusterNodesBy(oc, "worker") if len(masterNodes) == 1 && len(workerNodes) == 1 && masterNodes[0] == workerNodes[0] { return true } return false } // LoadCPUMemWorkload load cpu and memory workload func LoadCPUMemWorkload(oc *exutil.CLI, workLoadtime int) { var ( workerCPUtopstr string workerCPUtopint int workerMEMtopstr string workerMEMtopint int n int m int r int dn int cpuMetric = 800 memMetric = 700 reserveCPUP = 50 reserveMemP = 50 snoPodCapacity = 250 reservePodCapacity = 120 ) workerCPUtopall := []int{} workerMEMtopall := []int{} randomStr := exutil.GetRandomString() dirname := fmt.Sprintf("/tmp/-load-cpu-mem_%s/", randomStr) defer os.RemoveAll(dirname) os.MkdirAll(dirname, 0755) workerNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/master", "--no-headers").OutputToFile("load-cpu-mem_" + randomStr + "-log") o.Expect(err).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf(`cat %v |head -1 | awk '{print $1}'`, workerNode) cmdOut, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) worker1 := strings.Replace(string(cmdOut), "\n", "", 1) // Check if there is an node.metrics on node err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodemetrics", worker1).Execute() var workerTop string if err == nil { workerTop, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "node", worker1, "--no-headers=true").Output() o.Expect(err).NotTo(o.HaveOccurred()) } cpuUsageCmd := fmt.Sprintf(`echo "%v" | awk '{print $2}'`, workerTop) cpuUsage, err := exec.Command("bash", "-c", cpuUsageCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cpu1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cpuUsage), "") cpu, _ := strconv.Atoi(cpu1) cpuUsageCmdP := fmt.Sprintf(`echo "%v" | awk '{print $3}'`, workerTop) cpuUsageP, err := exec.Command("bash", "-c", cpuUsageCmdP).Output() o.Expect(err).NotTo(o.HaveOccurred()) cpuP1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cpuUsageP), "") cpuP, _ := strconv.Atoi(cpuP1) totalCPU := int(float64(cpu) / (float64(cpuP) / 100)) cmd = fmt.Sprintf(`cat %v | awk '{print $1}'`, workerNode) workerCPU1, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) workerCPU := strings.Fields(string(workerCPU1)) workerNodeCount := len(workerCPU) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < len(workerCPU); i++ { // Check if there is node.metrics on node err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodemetrics", workerCPU[i]).Execute() var workerCPUtop string if err == nil { workerCPUtop, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "node", workerCPU[i], "--no-headers=true").OutputToFile("load-cpu-mem_" + randomStr + "-log") o.Expect(err).NotTo(o.HaveOccurred()) } workerCPUtopcmd := fmt.Sprintf(`cat %v | awk '{print $3}'`, workerCPUtop) workerCPUUsage, err := exec.Command("bash", "-c", workerCPUtopcmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) workerCPUtopstr = regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(workerCPUUsage), "") workerCPUtopint, _ = strconv.Atoi(workerCPUtopstr) workerCPUtopall = append(workerCPUtopall, workerCPUtopint) } for j := 1; j < len(workerCPU); j++ { if workerCPUtopall[0] < workerCPUtopall[j] { workerCPUtopall[0] = workerCPUtopall[j] } } cpuMax := workerCPUtopall[0] availableCPU := int(float64(totalCPU) * (100 - float64(reserveCPUP) - float64(cpuMax)) / 100) e2e.Logf("----> Cluster has total CPU, Reserved CPU percentage, Max CPU of node :%v,%v,%v", totalCPU, reserveCPUP, cpuMax) n = int(availableCPU / int(cpuMetric)) if n <= 0 { e2e.Logf("No more CPU resource is available, no load will be added!") } else { if workerNodeCount == 1 { dn = 1 r = 2 } else { dn = 2 if n > workerNodeCount { r = 3 } else { r = workerNodeCount } } // Get the available pods of worker nodes, based on this, the upper limit for a namespace is calculated cmd1 := fmt.Sprintf(`oc describe node/%s | grep 'Non-terminated Pods' | grep -oP "[0-9]+"`, worker1) cmdOut1, err := exec.Command("bash", "-c", cmd1).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedPods, err := strconv.Atoi(regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cmdOut1), "")) o.Expect(err).NotTo(o.HaveOccurred()) availablePods := snoPodCapacity - usedPods - reservePodCapacity if workerNodeCount > 1 { availablePods = availablePods * workerNodeCount } nsMax := int(availablePods / dn / r) if nsMax > 0 { if n > nsMax { n = nsMax } } else { n = 1 r = 1 dn = 1 } e2e.Logf("Start CPU load ...") cpuloadCmd := fmt.Sprintf(`clusterbuster --basename=cpuload --workload=cpusoaker --namespaces=%v --processes=1 --deployments=%v --node-selector=node-role.kubernetes.io/master --tolerate=node-role.kubernetes.io/master:Equal:NoSchedule --workloadruntime=7200 --report=none > %v &`, n, dn, dirname+"clusterbuster-cpu-log") e2e.Logf("%v", cpuloadCmd) cmd := exec.Command("bash", "-c", cpuloadCmd) cmdErr := cmd.Start() o.Expect(cmdErr).NotTo(o.HaveOccurred()) // Wait for 3 mins(this time is based on many tests), when the load starts, it will reach a peak within a few minutes, then falls back. time.Sleep(180 * time.Second) e2e.Logf("----> Created cpuload related pods: %v", n*r*dn) } memUsageCmd := fmt.Sprintf(`echo "%v" | awk '{print $4}'`, workerTop) memUsage, err := exec.Command("bash", "-c", memUsageCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) mem1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(memUsage), "") mem, _ := strconv.Atoi(mem1) memUsageCmdP := fmt.Sprintf(`echo "%v" | awk '{print $5}'`, workerTop) memUsageP, err := exec.Command("bash", "-c", memUsageCmdP).Output() o.Expect(err).NotTo(o.HaveOccurred()) memP1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(memUsageP), "") memP, _ := strconv.Atoi(memP1) totalMem := int(float64(mem) / (float64(memP) / 100)) for i := 0; i < len(workerCPU); i++ { // Check if there is node.metrics on node err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodemetrics", workerCPU[i]).Execute() var workerMEMtop string if err == nil { workerMEMtop, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "node", workerCPU[i], "--no-headers=true").OutputToFile("load-cpu-mem_" + randomStr + "-log") o.Expect(err).NotTo(o.HaveOccurred()) } workerMEMtopcmd := fmt.Sprintf(`cat %v | awk '{print $5}'`, workerMEMtop) workerMEMUsage, err := exec.Command("bash", "-c", workerMEMtopcmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) workerMEMtopstr = regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(workerMEMUsage), "") workerMEMtopint, _ = strconv.Atoi(workerMEMtopstr) workerMEMtopall = append(workerMEMtopall, workerMEMtopint) } for j := 1; j < len(workerCPU); j++ { if workerMEMtopall[0] < workerMEMtopall[j] { workerMEMtopall[0] = workerMEMtopall[j] } } memMax := workerMEMtopall[0] availableMem := int(float64(totalMem) * (100 - float64(reserveMemP) - float64(memMax)) / 100) m = int(availableMem / int(memMetric)) e2e.Logf("----> Cluster has total Mem, Reserved Mem percentage, Max memory of node :%v,%v,%v", totalMem, reserveMemP, memMax) if m <= 0 { e2e.Logf("No more memory resource is available, no load will be added!") } else { if workerNodeCount == 1 { dn = 1 r = 2 } else { r = workerNodeCount if m > workerNodeCount { dn = m } else { dn = workerNodeCount } } // Get the available pods of worker nodes, based on this, the upper limit for a namespace is calculated cmd1 := fmt.Sprintf(`oc describe node/%v | grep 'Non-terminated Pods' | grep -oP "[0-9]+"`, worker1) cmdOut1, err := exec.Command("bash", "-c", cmd1).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedPods, err := strconv.Atoi(regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cmdOut1), "")) o.Expect(err).NotTo(o.HaveOccurred()) availablePods := snoPodCapacity - usedPods - reservePodCapacity if workerNodeCount > 1 { availablePods = availablePods * workerNodeCount // Reduce the number pods in which workers create memory loads concurrently, avoid kubelet crash if availablePods > 200 { availablePods = int(availablePods / 2) } } nsMax := int(availablePods / dn / r) if nsMax > 0 { if m > nsMax { m = nsMax } } else { m = 1 r = 1 dn = 1 } e2e.Logf("Start Memory load ...") memloadCmd := fmt.Sprintf(`clusterbuster --basename=memload --workload=memory --namespaces=%v --processes=1 --deployments=%v --node-selector=node-role.kubernetes.io/master --tolerate=node-role.kubernetes.io/master:Equal:NoSchedule --workloadruntime=7200 --report=none> %v &`, m, dn, dirname+"clusterbuster-mem-log") e2e.Logf("%v", memloadCmd) cmd := exec.Command("bash", "-c", memloadCmd) cmdErr := cmd.Start() o.Expect(cmdErr).NotTo(o.HaveOccurred()) // Wait for 5 mins, ensure that all load pods are strated up. time.Sleep(300 * time.Second) e2e.Logf("----> Created memload related pods: %v", m*r*dn) } // If load are landed, will do some checking with logs if n > 0 || m > 0 { keywords := "body: net/http: request canceled (Client.Timeout|panic" bustercmd := fmt.Sprintf(`cat %v | grep -iE '%s' || true`, dirname+"clusterbuster*", keywords) busterLogs, err := exec.Command("bash", "-c", bustercmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(busterLogs) > 0 { e2e.Logf("%s", busterLogs) e2e.Logf("Found some panic or timeout errors, if errors are potential bug then file a bug.") } else { e2e.Logf("No errors found in clusterbuster logs") } } else { e2e.Logf("No more CPU and memory resource, no any load is added.") } } // CopyToFile copy a given file into a temp folder with given file name func CopyToFile(fromPath string, toFilename string) string { // check if source file is regular file srcFileStat, err := os.Stat(fromPath) if err != nil { e2e.Failf("get source file %s stat failed: %v", fromPath, err) } if !srcFileStat.Mode().IsRegular() { e2e.Failf("source file %s is not a regular file", fromPath) } // open source file source, err := os.Open(fromPath) if err != nil { e2e.Failf("open source file %s failed: %v", fromPath, err) } defer source.Close() // open dest file saveTo := filepath.Join(e2e.TestContext.OutputDir, toFilename) dest, err := os.Create(saveTo) if err != nil { e2e.Failf("open destination file %s failed: %v", saveTo, err) } defer dest.Close() // copy from source to dest _, err = io.Copy(dest, source) if err != nil { e2e.Failf("copy file from %s to %s failed: %v", fromPath, saveTo, err) } return saveTo } func ExecCommandOnPod(oc *exutil.CLI, podname string, namespace string, command string) string { var podOutput string var execpodErr error errExec := wait.PollUntilContextTimeout(context.Background(), 15*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { podOutput, execpodErr = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podname, "--", "/bin/sh", "-c", command).Output() podOutput = strings.TrimSpace(podOutput) e2e.Logf("Attempting to execute command on pod %v. Output: %v, Error: %v", podname, podOutput, execpodErr) if execpodErr != nil { // Check for TLS internal error and handle CSR approval if detected, https://access.redhat.com/solutions/4307511 matchTLS, _ := regexp.MatchString(`(?i)tls.*internal error`, podOutput) if matchTLS { e2e.Logf("Detected TLS error in output for pod %v: %v", podname, podOutput) // Attempt to approve any pending CSRs getCsr, getCsrErr := getPendingCSRs(oc) if getCsrErr != nil { e2e.Logf("Error retrieving pending CSRs: %v", getCsrErr) return false, nil } for _, csr := range getCsr { e2e.Logf("Approving CSR: %v", csr) appCsrErr := oc.WithoutNamespace().AsAdmin().Run("adm").Args("certificate", "approve", csr).Execute() if appCsrErr != nil { e2e.Logf("Error approving CSR %v: %v", csr, appCsrErr) return false, nil } } e2e.Logf("Pending CSRs approved. Retrying command on pod %v...", podname) return false, nil } else { e2e.Logf("Command execution error on pod %v: %v", podname, execpodErr) return false, nil } } else if podOutput != "" { e2e.Logf("Successfully retrieved non-empty output from pod %v: %v", podname, podOutput) return true, nil } else { e2e.Logf("Received empty output from pod %v. Retrying...", podname) return false, nil } }) exutil.AssertWaitPollNoErr(errExec, fmt.Sprintf("Unable to run command on pod %v :: %v :: Output: %v :: Error: %v", podname, command, podOutput, execpodErr)) return podOutput } // clusterHealthcheck do cluster health check like pod, node and operators func clusterHealthcheck(oc *exutil.CLI, dirname string) error { err := clusterNodesHealthcheck(oc, 600, dirname) if err != nil { return fmt.Errorf("Cluster nodes health check failed. Abnormality found in nodes.") } err = clusterOperatorHealthcheck(oc, 1500, dirname) if err != nil { return fmt.Errorf("Cluster operators health check failed. Abnormality found in cluster operators.") } err = clusterPodsHealthcheck(oc, 600, dirname) if err != nil { return fmt.Errorf("Cluster pods health check failed. Abnormality found in pods.") } return nil } // clusterOperatorHealthcheck check abnormal operators func clusterOperatorHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error { e2e.Logf("Check the abnormal operators") errCo := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { coLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "--no-headers").OutputToFile(dirname) if err == nil { cmd := fmt.Sprintf(`cat %v | grep -v '.True.*False.*False' || true`, coLogFile) coLogs, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(coLogs) > 0 { return false, nil } } else { return false, nil } err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("No abnormality found in cluster operators...") return true, nil }) if errCo != nil { err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } return errCo } // clusterPodsHealthcheck check abnormal pods. func clusterPodsHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error { e2e.Logf("Check the abnormal pods") var podLogs []byte errPod := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { podLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-A").OutputToFile(dirname) if err == nil { cmd := fmt.Sprintf(`cat %v | grep -ivE 'Running|Completed|namespace|installer' || true`, podLogFile) podLogs, err = exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(podLogs) > 0 { return false, nil } } else { return false, nil } e2e.Logf("No abnormality found in pods...") return true, nil }) if errPod != nil { e2e.Logf("%s", podLogs) } return errPod } // clusterNodesHealthcheck check abnormal nodes func clusterNodesHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error { errNode := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output() if err == nil { if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") { return false, nil } } else { return false, nil } e2e.Logf("Nodes are normal...") err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute() o.Expect(err).NotTo(o.HaveOccurred()) return true, nil }) if errNode != nil { err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } return errNode } // apiserverReadinessProbe use for microshift to check apiserver readiness func apiserverReadinessProbe(tokenValue string, apiserverName string) string { timeoutDuration := 3 * time.Second var bodyString string url := fmt.Sprintf(`%s/apis`, apiserverName) req, err := http.NewRequest("GET", url, nil) if err != nil { e2e.Failf("error creating request: %v", err) } req.Header.Set("Authorization", "Bearer "+tokenValue) req.Header.Set("X-OpenShift-Internal-If-Not-Ready", "reject") transport := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } client := &http.Client{ Transport: transport, Timeout: timeoutDuration, } errCurl := wait.PollImmediate(1*time.Second, 300*time.Second, func() (bool, error) { resp, err := client.Do(req) if err != nil { e2e.Logf("Error while making curl request :: %v", err) return false, nil } defer resp.Body.Close() if resp.StatusCode == 429 { bodyBytes, _ := ioutil.ReadAll(resp.Body) bodyString = string(bodyBytes) return strings.Contains(bodyString, "The apiserver hasn't been fully initialized yet, please try again later"), nil } return false, nil }) exutil.AssertWaitPollNoErr(errCurl, fmt.Sprintf("error waiting for API server readiness: %v", errCurl)) return bodyString } // Get one available service IP, retry 30 times func getServiceIP(oc *exutil.CLI, clusterIP string) net.IP { var serviceIP net.IP err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { randomServiceIP := net.ParseIP(clusterIP).To4() if randomServiceIP != nil { randomServiceIP[3] += byte(rand.Intn(254 - 1)) } else { randomServiceIP = net.ParseIP(clusterIP).To16() randomServiceIP[len(randomServiceIP)-1] = byte(rand.Intn(254 - 1)) } output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-A", `-o=jsonpath={.items[*].spec.clusterIP}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) if matched, _ := regexp.MatchString(randomServiceIP.String(), output); matched { e2e.Logf("IP %v has been used!", randomServiceIP) return false, nil } serviceIP = randomServiceIP return true, nil }) exutil.AssertWaitPollNoErr(err, "Failed to get one available service IP!") return serviceIP } // the method is to do something with oc. func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) { if asAdmin && withoutNamespace { return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output() } if asAdmin && !withoutNamespace { return oc.AsAdmin().Run(action).Args(parameters...).Output() } if !asAdmin && withoutNamespace { return oc.WithoutNamespace().Run(action).Args(parameters...).Output() } if !asAdmin && !withoutNamespace { return oc.Run(action).Args(parameters...).Output() } return "", nil } // Get something existing resource func getResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) { return doAction(oc, "get", asAdmin, withoutNamespace, parameters...) } // Get something resource to be ready func getResourceToBeReady(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) string { var result string var err error errPoll := wait.PollUntilContextTimeout(context.Background(), 6*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { result, err = doAction(oc, "get", asAdmin, withoutNamespace, parameters...) if err != nil || len(result) == 0 { e2e.Logf("Unable to retrieve the expected resource, retrying...") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(errPoll, fmt.Sprintf("Failed to retrieve %v", parameters)) e2e.Logf("The resource returned:\n%v", result) return result } func getGlobalProxy(oc *exutil.CLI) (string, string, string) { httpProxy, err := getResource(oc, asAdmin, withoutNamespace, "proxy", "cluster", "-o=jsonpath={.status.httpProxy}") o.Expect(err).NotTo(o.HaveOccurred()) httpsProxy, err := getResource(oc, asAdmin, withoutNamespace, "proxy", "cluster", "-o=jsonpath={.status.httpsProxy}") o.Expect(err).NotTo(o.HaveOccurred()) noProxy, err := getResource(oc, asAdmin, withoutNamespace, "proxy", "cluster", "-o=jsonpath={.status.noProxy}") o.Expect(err).NotTo(o.HaveOccurred()) return httpProxy, httpsProxy, noProxy } // Get the pods List by label func getPodsListByLabel(oc *exutil.CLI, namespace string, selectorLabel string) []string { podsOp := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pod", "-n", namespace, "-l", selectorLabel, "-o=jsonpath={.items[*].metadata.name}") o.Expect(podsOp).NotTo(o.BeEmpty()) return strings.Split(podsOp, " ") } func checkApiserversAuditPolicies(oc *exutil.CLI, auditPolicyName string) { e2e.Logf("Checking the current " + auditPolicyName + " audit policy of cluster") defaultProfile := getResourceToBeReady(oc, asAdmin, withoutNamespace, "apiserver/cluster", `-o=jsonpath={.spec.audit.profile}`) o.Expect(defaultProfile).Should(o.ContainSubstring(auditPolicyName), "current audit policy of cluster is not default :: "+defaultProfile) e2e.Logf("Checking the audit config file of kube-apiserver currently in use.") podsList := getPodsListByLabel(oc.AsAdmin(), "openshift-kube-apiserver", "app=openshift-kube-apiserver") execKasOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-kube-apiserver", "ls /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-audit-policies/") re := regexp.MustCompile(`policy.yaml`) matches := re.FindAllString(execKasOuptut, -1) if len(matches) == 0 { e2e.Failf("Audit config file of kube-apiserver is wrong :: %s", execKasOuptut) } e2e.Logf("Audit config file of kube-apiserver :: %s", execKasOuptut) e2e.Logf("Checking the audit config file of openshif-apiserver currently in use.") podsList = getPodsListByLabel(oc.AsAdmin(), "openshift-apiserver", "app=openshift-apiserver-a") execOasOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-apiserver", "cat /var/run/configmaps/config/config.yaml") re = regexp.MustCompile(`/var/run/configmaps/audit/policy.yaml`) matches = re.FindAllString(execOasOuptut, -1) if len(matches) == 0 { e2e.Failf("Audit config file of openshift-apiserver is wrong :: %s", execOasOuptut) } e2e.Logf("Audit config file of openshift-apiserver :: %v", matches) e2e.Logf("Checking the audit config file of openshif-oauth-apiserver currently in use.") podsList = getPodsListByLabel(oc.AsAdmin(), "openshift-oauth-apiserver", "app=openshift-oauth-apiserver") execAuthOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-oauth-apiserver", "ls /var/run/configmaps/audit/") re = regexp.MustCompile(`policy.yaml`) matches = re.FindAllString(execAuthOuptut, -1) if len(matches) == 0 { e2e.Failf("Audit config file of openshift-oauth-apiserver is wrong :: %s", execAuthOuptut) } e2e.Logf("Audit config file of openshift-oauth-apiserver :: %v", execAuthOuptut) } func checkAuditLogs(oc *exutil.CLI, script string, masterNode string, namespace string) (string, int) { g.By(fmt.Sprintf("Get audit log file from %s", masterNode)) masterNodeLogs, checkLogFileErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=" + namespace}, "bash", "-c", script) o.Expect(checkLogFileErr).NotTo(o.HaveOccurred()) errCount := len(strings.TrimSpace(masterNodeLogs)) return masterNodeLogs, errCount } func setAuditProfile(oc *exutil.CLI, patchNamespace string, patch string) string { expectedProgCoStatus := map[string]string{"Progressing": "True"} expectedCoStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} coOps := []string{"authentication", "openshift-apiserver"} patchOutput, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchNamespace, "--type=json", "-p", patch).Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(patchOutput, "patched") { e2e.Logf("Checking KAS, OAS, Auththentication operators should be in Progressing and Available after audit profile change") g.By("Checking kube-apiserver operator should be in Progressing in 100 seconds") err = waitCoBecomes(oc, "kube-apiserver", 100, expectedProgCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") // Using 60s because KAS takes long time, when KAS finished rotation, OAS and Auth should have already finished. for _, ops := range coOps { e2e.Logf("Checking %s should be Available in 60 seconds", ops) err = waitCoBecomes(oc, ops, 60, expectedCoStatus) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%v operator is not becomes available in 60 seconds", ops)) } e2e.Logf("Post audit profile set. KAS, OAS and Auth operator are available after rollout") return patchOutput } return patchOutput } func getNewUser(oc *exutil.CLI, count int) ([]User, string, string) { command := "htpasswd" _, err := exec.LookPath("command") if err != nil { e2e.Failf("Command '%s' not found in PATH, exit execution!", command) } usersDirPath := "/tmp/" + exutil.GetRandomString() usersHTpassFile := usersDirPath + "/htpasswd" err = os.MkdirAll(usersDirPath, 0o755) o.Expect(err).NotTo(o.HaveOccurred()) htPassSecret, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("oauth/cluster", "-o", "jsonpath={.spec.identityProviders[0].htpasswd.fileData.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if htPassSecret == "" { htPassSecret = "htpass-secret" os.Create(usersHTpassFile) err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", "openshift-config", "secret", "generic", htPassSecret, "--from-file", "htpasswd="+usersHTpassFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("--type=json", "-p", `[{"op": "add", "path": "/spec/identityProviders", "value": [{"htpasswd": {"fileData": {"name": "htpass-secret"}}, "mappingMethod": "claim", "name": "htpasswd", "type": "HTPasswd"}]}]`, "oauth/cluster").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } else { err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("-n", "openshift-config", "secret/"+htPassSecret, "--to", usersDirPath, "--confirm").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } users := make([]User, count) for i := 0; i < count; i++ { // Generate new username and password users[i].Username = fmt.Sprintf("testuser-%v-%v", i, exutil.GetRandomString()) users[i].Password = exutil.GetRandomString() // Add new user to htpasswd file in the temp directory cmd := fmt.Sprintf("htpasswd -b %v %v %v", usersHTpassFile, users[i].Username, users[i].Password) err := exec.Command("bash", "-c", cmd).Run() o.Expect(err).NotTo(o.HaveOccurred()) } // Update htpass-secret with the modified htpasswd file err = oc.AsAdmin().WithoutNamespace().Run("set").Args("-n", "openshift-config", "data", "secret/"+htPassSecret, "--from-file", "htpasswd="+usersHTpassFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking authentication operator should be in Progressing in 180 seconds") err = waitCoBecomes(oc, "authentication", 180, map[string]string{"Progressing": "True"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not start progressing in 180 seconds") e2e.Logf("Checking authentication operator should be Available in 600 seconds") err = waitCoBecomes(oc, "authentication", 600, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not becomes available in 600 seconds") return users, usersHTpassFile, htPassSecret } func userCleanup(oc *exutil.CLI, users []User, usersHTpassFile string, htPassSecret string) { defer os.RemoveAll(usersHTpassFile) for _, user := range users { // Add new user to htpasswd file in the temp directory cmd := fmt.Sprintf("htpasswd -D %v %v", usersHTpassFile, user.Username) err := exec.Command("bash", "-c", cmd).Run() o.Expect(err).NotTo(o.HaveOccurred()) } // Update htpass-secret with the modified htpasswd file err := oc.AsAdmin().WithoutNamespace().Run("set").Args("-n", "openshift-config", "data", "secret/"+htPassSecret, "--from-file", "htpasswd="+usersHTpassFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking authentication operator should be in Progressing in 180 seconds") err = waitCoBecomes(oc, "authentication", 180, map[string]string{"Progressing": "True"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not start progressing in 180 seconds") e2e.Logf("Checking authentication operator should be Available in 600 seconds") err = waitCoBecomes(oc, "authentication", 600, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not becomes available in 600 seconds") } func isConnectedInternet(oc *exutil.CLI) bool { masterNode, masterErr := exutil.GetFirstMasterNode(oc) o.Expect(masterErr).NotTo(o.HaveOccurred()) cmd := `timeout 9 curl -k https://github.com/openshift/ruby-hello-world/ > /dev/null;[ $? -eq 0 ] && echo "connected"` output, _ := exutil.DebugNodeWithChroot(oc, masterNode, "bash", "-c", cmd) if matched, _ := regexp.MatchString("connected", output); !matched { // Failed to access to the internet in the cluster. return false } return true } func restartMicroshift(nodename string) error { // Try restarting microshift three times var restartErr error for i := 0; i < 3; i++ { // Execute the command _, restartErr = runSSHCommand(nodename, "redhat", "sudo systemctl restart microshift") if restartErr != nil { e2e.Logf("Error restarting microshift :: %v", restartErr) time.Sleep(time.Second * 5) // Wait for 5 seconds before retrying continue } // If successful, break out of the loop break } if restartErr != nil { return fmt.Errorf("Failed to restart Microshift server: %v", restartErr) } var output string var err error pollErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) { output, err = runSSHCommand(nodename, "redhat", "sudo systemctl is-active microshift") if err != nil { return false, nil // Retry } return strings.TrimSpace(output) == "active", nil }) if pollErr != nil { return fmt.Errorf("Failed to perform action: %v", pollErr) } e2e.Logf("Microshift restarted successfully") return nil } func replacePatternInfile(microshiftFilePathYaml string, oldPattern string, newPattern string) { content, err := ioutil.ReadFile(microshiftFilePathYaml) o.Expect(err).NotTo(o.HaveOccurred()) re := regexp.MustCompile(oldPattern) newContent := re.ReplaceAll(content, []byte(newPattern)) err = ioutil.WriteFile(microshiftFilePathYaml, newContent, 0644) o.Expect(err).NotTo(o.HaveOccurred()) } // Get the pods List by label func getPodsList(oc *exutil.CLI, namespace string) []string { podsOp := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pod", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}") podNames := strings.Split(strings.TrimSpace(podsOp), " ") e2e.Logf("Namespace %s pods are: %s", namespace, string(podsOp)) return podNames } func changeMicroshiftConfig(configStr string, nodeName string, configPath string) { etcConfigCMD := fmt.Sprintf(`' configfile=%v cat > $configfile << EOF %v EOF'`, configPath, configStr) _, mchgConfigErr := runSSHCommand(nodeName, "redhat", "sudo bash -c", etcConfigCMD) o.Expect(mchgConfigErr).NotTo(o.HaveOccurred()) } func addKustomizationToMicroshift(nodeName string, kustomizationFiles map[string][]string) { for key, file := range kustomizationFiles { tmpFileName := getTestDataFilePath(file[0]) replacePatternInfile(tmpFileName, file[2], file[3]) fileOutput, err := exec.Command("bash", "-c", fmt.Sprintf(`cat %s`, tmpFileName)).Output() o.Expect(err).NotTo(o.HaveOccurred()) destFile := filepath.Join(file[1], strings.Split(key, ".")[0]+".yaml") fileCmd := fmt.Sprintf(`'cat > %s << EOF %s EOF'`, destFile, string(fileOutput)) _, mchgConfigErr := runSSHCommand(nodeName, "redhat", "sudo bash -c", fileCmd) o.Expect(mchgConfigErr).NotTo(o.HaveOccurred()) } } // Check ciphers of configmap of kube-apiservers, openshift-apiservers and oauth-openshift-apiservers are using. func verifyHypershiftCiphers(oc *exutil.CLI, expectedCipher string, ns string) error { var ( cipherStr string randomStr = exutil.GetRandomString() tmpDir = fmt.Sprintf("/tmp/-api-%s/", randomStr) ) defer os.RemoveAll(tmpDir) os.MkdirAll(tmpDir, 0755) for _, item := range []string{"kube-apiserver", "openshift-apiserver", "oauth-openshift"} { e2e.Logf("#### Checking the ciphers of %s:", item) if item == "kube-apiserver" { out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns, "kas-config", `-o=jsonpath='{.data.config\.json}'`).Output() o.Expect(err).NotTo(o.HaveOccurred()) // Use jq command line to extrack .servingInfo part JSON comming in string format jqCmd := fmt.Sprintf(`echo %s | jq -cr '.servingInfo | "\(.cipherSuites) \(.minTLSVersion)"'|tr -d '\n'`, out) outJQ, err := exec.Command("bash", "-c", jqCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cipherStr = string(outJQ) } else { jsonOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns, item, `-ojson`).OutputToFile("api-" + randomStr + "." + item) o.Expect(err).NotTo(o.HaveOccurred()) jqCmd := fmt.Sprintf(`cat %v | jq -r '.data."config.yaml"'`, jsonOut) yamlConfig, err := exec.Command("bash", "-c", jqCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) jsonConfig, errJson := util.Yaml2Json(string(yamlConfig)) o.Expect(errJson).NotTo(o.HaveOccurred()) jsonFile := tmpDir + item + "config.json" f, err := os.Create(jsonFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = fmt.Fprintf(w, "%s", jsonConfig) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) jqCmd1 := fmt.Sprintf(`jq -cr '.servingInfo | "\(.cipherSuites) \(.minTLSVersion)"' %s |tr -d '\n'`, jsonFile) jsonOut1, err := exec.Command("bash", "-c", jqCmd1).Output() o.Expect(err).NotTo(o.HaveOccurred()) cipherStr = string(jsonOut1) } e2e.Logf("#### Checking if the ciphers has been changed as the expected: %s", expectedCipher) if expectedCipher != cipherStr { e2e.Logf("#### Ciphers of %s are: %s", item, cipherStr) return fmt.Errorf("Ciphers not matched") } e2e.Logf("#### Ciphers are matched.") } return nil } // Waiting for apiservers restart func waitApiserverRestartOfHypershift(oc *exutil.CLI, appLabel string, ns string, waitTime int) error { re, err := regexp.Compile(`(0/[0-9]|Pending|Terminating|Init)`) o.Expect(err).NotTo(o.HaveOccurred()) errKas := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { out, _ := getResource(oc, asAdmin, withoutNamespace, "pods", "-l", "app="+appLabel, "--no-headers", "-n", ns) if matched := re.MatchString(out); matched { e2e.Logf("#### %s was restarting ...", appLabel) return false, nil } // Recheck status of pods and to do further confirm , avoid false restarts for i := 1; i <= 3; i++ { time.Sleep(10 * time.Second) out, _ = getResource(oc, asAdmin, withoutNamespace, "pods", "-l", "app="+appLabel, "--no-headers", "-n", ns) if matchedAgain := re.MatchString(out); matchedAgain { e2e.Logf("#### %s was restarting ...", appLabel) return false, nil } } e2e.Logf("#### %s have been restarted!", appLabel) return true, nil }) exutil.AssertWaitPollNoErr(errKas, "Failed to complete the restart within the expected time, please check the cluster status!") return errKas } func containsAnyWebHookReason(webhookError string, conditionReasons interface{}) bool { switch reasons := conditionReasons.(type) { case string: return strings.Contains(webhookError, reasons) case []string: for _, reason := range reasons { if strings.Contains(webhookError, reason) { return true } } return false default: return false } } func clientCurl(tokenValue string, url string) string { timeoutDuration := 3 * time.Second var bodyString string proxyURL := getProxyURL() req, err := http.NewRequest("GET", url, nil) if err != nil { e2e.Failf("error creating request: %v", err) } req.Header.Set("Authorization", "Bearer "+tokenValue) transport := &http.Transport{ Proxy: http.ProxyURL(proxyURL), TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } client := &http.Client{ Transport: transport, Timeout: timeoutDuration, } errCurl := wait.PollImmediate(10*time.Second, 300*time.Second, func() (bool, error) { resp, err := client.Do(req) if err != nil { return false, nil } defer resp.Body.Close() if resp.StatusCode == 200 { bodyBytes, _ := ioutil.ReadAll(resp.Body) bodyString = string(bodyBytes) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errCurl, fmt.Sprintf("error waiting for curl request output: %v", errCurl)) return bodyString } // Return the API server FQDN and port. format is like api.$clustername.$basedomain func getApiServerFQDNandPort(oc *exutil.CLI, hypershiftCluster bool) (string, string) { var ( apiServerURL string configErr error ) if !hypershiftCluster { apiServerURL, configErr = oc.AsAdmin().WithoutNamespace().Run("config").Args("view", "-ojsonpath={.clusters[0].cluster.server}").Output() } else { apiServerURL, configErr = oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("config").Args("view", "-ojsonpath={.clusters[0].cluster.server}").Output() } o.Expect(configErr).NotTo(o.HaveOccurred()) fqdnName, parseErr := url.Parse(apiServerURL) o.Expect(parseErr).NotTo(o.HaveOccurred()) return fqdnName.Hostname(), fqdnName.Port() } // isTechPreviewNoUpgrade checks if a cluster is a TechPreviewNoUpgrade cluster func isTechPreviewNoUpgrade(oc *exutil.CLI) bool { featureGate, err := oc.AdminConfigClient().ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return false } e2e.Failf("could not retrieve feature-gate: %v", err) } return featureGate.Spec.FeatureSet == configv1.TechPreviewNoUpgrade } // IsIPv4 check if the string is an IPv4 address. func isIPv4(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ".") } // IsIPv6 check if the string is an IPv6 address. func isIPv6(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ":") } // Copy one public image to the internel image registry of OCP cluster func copyImageToInternelRegistry(oc *exutil.CLI, namespace string, source string, dest string) (string, error) { var ( podName string appName = "skopeo" err error ) podName, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", "name="+appName, "-o", `jsonpath={.items[*].metadata.name}`).Output() // If the skopeo pod doesn't exist, create it if len(podName) == 0 { template := getTestDataFilePath("skopeo-deployment.json") err = oc.Run("create").Args("-f", template, "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) podName = getPodsListByLabel(oc.AsAdmin(), namespace, "name="+appName)[0] exutil.AssertPodToBeReady(oc, podName, namespace) } else { output, err := oc.AsAdmin().Run("get").Args("pod", podName, "-n", namespace, "-o", "jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("True"), appName+" pod is not ready!") } token, err := getSAToken(oc, "builder", namespace) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(token).NotTo(o.BeEmpty()) command := []string{podName, "-n", namespace, "--", appName, "--insecure-policy", "--src-tls-verify=false", "--dest-tls-verify=false", "copy", "--dcreds", "dnm:" + token, source, dest} results, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(command...).Output() return results, err } // Check if BaselineCapabilities have been set func isBaselineCapsSet(oc *exutil.CLI) bool { baselineCapabilitySet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.spec.capabilities.baselineCapabilitySet}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("baselineCapabilitySet parameters: %v\n", baselineCapabilitySet) return len(baselineCapabilitySet) != 0 } // Check if component is listed in clusterversion.status.capabilities.enabledCapabilities func isEnabledCapability(oc *exutil.CLI, component string) bool { enabledCapabilities, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].status.capabilities.enabledCapabilities}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Cluster enabled capability parameters: %v\n", enabledCapabilities) return strings.Contains(enabledCapabilities, component) } func checkURLEndpointAccess(oc *exutil.CLI, hostIP, nodePort, podName, portCommand, status string) { var url string var curlOutput string var curlErr error if isIPv6(hostIP) { url = fmt.Sprintf("[%s]:%s", hostIP, nodePort) } else { url = fmt.Sprintf("%s:%s", hostIP, nodePort) } // Construct the full command with the specified command and URL var fullCommand string if portCommand == "https" { fullCommand = fmt.Sprintf("curl -k https://%s", url) } else { fullCommand = fmt.Sprintf("curl %s", url) } e2e.Logf("Command: %v", fullCommand) e2e.Logf("Checking if the specified URL endpoint %s is accessible", url) err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 6*time.Second, false, func(cxt context.Context) (bool, error) { curlOutput, curlErr = oc.Run("exec").Args(podName, "-i", "--", "sh", "-c", fullCommand).Output() if curlErr != nil { return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Unable to access %s", url)) o.Expect(curlOutput).To(o.ContainSubstring(status)) } type CertificateDetails struct { CurlResponse string Subject string Issuer string NotBefore string NotAfter string SubjectAltName []string SerialNumber string } // urlHealthCheck performs a health check on the given FQDN name and port func urlHealthCheck(fqdnName string, port string, certPath string, returnValues []string) (*CertificateDetails, error) { proxyURL := getProxyURL() caCert, err := ioutil.ReadFile(certPath) if err != nil { return nil, fmt.Errorf("Error reading CA certificate: %s", err) } // Create a CertPool and add the CA certificate caCertPool := x509.NewCertPool() if !caCertPool.AppendCertsFromPEM(caCert) { return nil, fmt.Errorf("Failed to append CA certificate") } // Create a custom transport with the CA certificate transport := &http.Transport{ Proxy: http.ProxyURL(proxyURL), TLSClientConfig: &tls.Config{ RootCAs: caCertPool, }, } client := &http.Client{ Transport: transport, } url := fmt.Sprintf("https://%s:%s/healthz", fqdnName, port) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() var certDetails *CertificateDetails err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) { resp, err := client.Get(url) if err != nil { e2e.Logf("Error performing HTTP request: %s, retrying...\n", err) return false, nil } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return false, fmt.Errorf("Error reading response body: %s", err) } certDetails = &CertificateDetails{} if resp.TLS != nil && len(resp.TLS.PeerCertificates) > 0 { cert := resp.TLS.PeerCertificates[0] for _, value := range returnValues { switch value { case "CurlResponse": certDetails.CurlResponse = string(body) case "Subject": certDetails.Subject = cert.Subject.String() case "Issuer": certDetails.Issuer = cert.Issuer.String() case "NotBefore": certDetails.NotBefore = cert.NotBefore.Format(time.RFC3339) case "NotAfter": certDetails.NotAfter = cert.NotAfter.Format(time.RFC3339) case "SubjectAltName": certDetails.SubjectAltName = cert.DNSNames case "SerialNumber": certDetails.SerialNumber = cert.SerialNumber.String() } } } return true, nil }) if err != nil { return nil, fmt.Errorf("Error performing HTTP request: %s", err) } return certDetails, nil } func runSSHCommand(server, user string, commands ...string) (string, error) { // Combine commands into a single string fullCommand := strings.Join(commands, " ") sshkey, err := exutil.GetPrivateKey() o.Expect(err).NotTo(o.HaveOccurred()) sshClient := exutil.SshClient{User: user, Host: server, Port: 22, PrivateKey: sshkey} return sshClient.RunOutput(fullCommand) } func getProxyURL() *url.URL { // Prefer https_proxy, fallback to http_proxy proxyURLString := os.Getenv("https_proxy") if proxyURLString == "" { proxyURLString = os.Getenv("http_proxy") } if proxyURLString == "" { return nil } proxyURL, err := url.Parse(proxyURLString) if err != nil { e2e.Failf("error parsing proxy URL: %v", err) } return proxyURL } func getMicroshiftHostname(oc *exutil.CLI) string { microShiftURL, err := oc.AsAdmin().WithoutNamespace().Run("config").Args("view", "-ojsonpath={.clusters[0].cluster.server}").Output() o.Expect(err).NotTo(o.HaveOccurred()) fqdnName, err := url.Parse(microShiftURL) o.Expect(err).NotTo(o.HaveOccurred()) return fqdnName.Hostname() } func applyLabel(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) { _, err := doAction(oc, "label", asAdmin, withoutNamespace, parameters...) o.Expect(err).NotTo(o.HaveOccurred(), "Adding label to the namespace failed") } // Function to get audit event logs for user login. func checkUserAuditLog(oc *exutil.CLI, logGroup string, user string, pass string) (string, int) { var ( eventLogs string eventCount = 0 n int now = time.Now().UTC().Unix() ) errUser := oc.AsAdmin().WithoutNamespace().Run("login").Args("-u", user, "-p", pass).NotShowInfo().Execute() o.Expect(errUser).NotTo(o.HaveOccurred()) whoami, err := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("whoami: %s", whoami) err = oc.AsAdmin().WithoutKubeconf().WithoutNamespace().Run("logout").Args().Execute() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The user %s logged out successfully", user) script := fmt.Sprintf(`rm -if /tmp/audit-test-*.json; for logpath in kube-apiserver oauth-apiserver openshift-apiserver;do grep -h "%s" /var/log/${logpath}/audit*.log | jq -c 'select (.requestReceivedTimestamp | .[0:19] + "Z" | fromdateiso8601 > %v)' >> /tmp/audit-test-$logpath.json; done; cat /tmp/audit-test-*.json`, logGroup, now) contextErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", "admin").Execute() o.Expect(contextErr).NotTo(o.HaveOccurred()) e2e.Logf("Get all master nodes.") masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) for _, masterNode := range masterNodes { eventLogs, n = checkAuditLogs(oc, script, masterNode, "openshift-kube-apiserver") e2e.Logf("event logs count:%v", n) eventCount += n } return eventLogs, eventCount } func verifyMicroshiftLogs(nodename string, cmd string, cmp string) (string, int, error) { var ( output string err error ) mstatusErr := wait.PollUntilContextTimeout(context.Background(), 6*time.Second, 200*time.Second, false, func(cxt context.Context) (bool, error) { output, err = runSSHCommand(nodename, "redhat", cmd) if err != nil { return false, err } count := len(strings.TrimSpace(output)) switch cmp { case "==": if count == 0 { return true, nil } case ">": if count > 0 { return true, nil } case "<": if count < 0 { return true, nil } default: return false, fmt.Errorf("invalid comparison operator") } return false, nil }) return output, len(strings.TrimSpace(output)), mstatusErr } func getMicroshiftConfig(nodeName string, cmd string, keyValue string) (string, error) { var strValue string mstatusErr := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { // Run SSH command to get the YAML configuration yamlData, err := runSSHCommand(nodeName, "redhat", cmd) if err == nil && yamlData != "" { yamlToJson, yamlErr := exutil.Yaml2Json(yamlData) if yamlErr == nil && yamlToJson != "" { // Parse YAML data yamlJson := gjson.Parse(yamlToJson).String() if yamlJson != "" { // Get value from JSON using provided key strValue = gjson.Get(yamlJson, keyValue).String() if strValue != "" { e2e.Logf("Config values : %s", strValue) return true, nil } } } } return false, nil }) return strValue, mstatusErr } func gatherSosreports(fqdnName string, user string, sosReportCmd string, tmpdir string) string { sosreportStatus, sosErr := runSSHCommand(fqdnName, user, sosReportCmd) o.Expect(sosErr).NotTo(o.HaveOccurred()) e2e.Logf("SOS Report :: %v", sosreportStatus) o.Expect(strings.Contains(sosreportStatus, "Your sos report has been generated and saved in")).To(o.BeTrue()) o.Expect(strings.Contains(sosreportStatus, tmpdir+"/sosreport")).To(o.BeTrue()) return sosreportStatus } func clusterSanityCheck(oc *exutil.CLI) error { var ( project_ns = exutil.GetRandomString() errCreateProj error ) statusNode, errNode := getResource(oc, asAdmin, withoutNamespace, "node") if errNode != nil { e2e.Logf("Error fetching Node Status: %s :: %s", statusNode, errNode.Error()) if strings.ContainsAny(errNode.Error(), "Unable to connect to the server: net/http: TLS handshake timeout") { e2e.Failf("Cluster Not accessible, may be env issue issue or network disruption") } } statusCO, errCO := getResource(oc, asAdmin, withoutNamespace, "co") if errCO != nil { e2e.Logf("Error fetching Cluster Operators Status: %s :: %s", statusCO, errCO.Error()) if strings.ContainsAny(errCO.Error(), "Unable to connect to the server: tls: failed to verify certificate: x509: certificate signed by unknown authority") { status, _ := getResource(oc, asAdmin, withoutNamespace, "co", "--insecure-skip-tls-verify") e2e.Logf("cluster Operators Status :: %s", status) statusKAS, _ := getResource(oc, asAdmin, withoutNamespace, "co", "kube-apiserver", "-o", "yaml", "--insecure-skip-tls-verify") e2e.Logf("KAS Operators Status :: %s", statusKAS) } } // retry to create new project to avoid transient ServiceUnavailable of openshift-apiserver o.Eventually(func() bool { errCreateProj = oc.AsAdmin().WithoutNamespace().Run("new-project").Args(project_ns, "--skip-config-write").Execute() return errCreateProj == nil }, 9*time.Second, 3*time.Second).Should(o.BeTrue(), fmt.Sprintf("Failed to create project %s with error %v", project_ns, errCreateProj)) if errCreateProj != nil && strings.ContainsAny(errCreateProj.Error(), "the server is currently unable to handle the request") { status, _ := getResource(oc, asAdmin, withoutNamespace, "co") e2e.Logf("cluster Operators Status :: %s", status) } errDeleteProj := oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", project_ns, "--ignore-not-found").Execute() if errDeleteProj != nil { e2e.Logf("Error deleting project %s: %s", project_ns, errDeleteProj.Error()) } if errCO != nil || errCreateProj != nil || errDeleteProj != nil { return fmt.Errorf("cluster sanity check failed") } e2e.Logf("Cluster sanity check passed") return nil } func clusterSanityCheckMicroShift(oc *exutil.CLI) error { statusNode, errNode := getResource(oc, asAdmin, withoutNamespace, "node") if errNode != nil { e2e.Logf("Error fetching Node Status: %s :: %s", statusNode, errNode.Error()) if strings.ContainsAny(errNode.Error(), "Unable to connect to the server: net/http: TLS handshake timeout") { e2e.Failf("Cluster Not accessible, may be env issue issue or network disruption") } } project_ns := exutil.GetRandomString() errCreateNs := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", project_ns).Execute() if errCreateNs != nil { e2e.Logf("Error creating project %s: %s", project_ns, errCreateNs.Error()) } errDeleteNs := oc.WithoutNamespace().Run("delete").Args("ns", project_ns, "--ignore-not-found").Execute() if errDeleteNs != nil { e2e.Logf("Error deleting project %s: %s", project_ns, errDeleteNs.Error()) } if errCreateNs != nil || errDeleteNs != nil { return fmt.Errorf("Cluster sanity check failed") } e2e.Logf("Cluster sanity check passed") return nil } // getPendingCSRs retrieves all pending CSRs and returns a list of their names func getPendingCSRs(oc *exutil.CLI) ([]string, error) { output := getResourceToBeReady(oc, asAdmin, withoutNamespace, "csr") o.Expect(output).NotTo(o.BeEmpty()) // Convert the output to a string and split it into lines outputStr := string(output) lines := strings.Split(outputStr, "\n") var pendingCSRs []string // Filter for CSRs with status "Pending" and extract the CSR name for _, line := range lines { if strings.Contains(line, "Pending") { fields := strings.Fields(line) if len(fields) > 0 { pendingCSRs = append(pendingCSRs, fields[0]) // Append CSR name to the list } } } // If no pending CSRs were found, return an empty list and no error return pendingCSRs, nil } func getResourceWithKubeconfig(oc *exutil.CLI, newKubeconfig string, waitForError bool, getResource ...string) (string, error) { var output string var err error args := append([]string{newKubeconfig}, getResource...) pollErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 120*time.Second, false, func(ctx context.Context) (bool, error) { output, err = oc.AsAdmin().WithoutNamespace().WithoutKubeconf().Run("--kubeconfig").Args(args...).Output() if err != nil { if waitForError { return false, nil } return true, err } return true, nil // Success }) if pollErr != nil { if waitForError { return "", fmt.Errorf("timed out waiting for `%v` command to succeed: %w :: and error is `%v`", getResource, pollErr, err) } return "", pollErr } return output, err } func kasOperatorCheckForStep(oc *exutil.CLI, preConfigKasStatus map[string]string, step string, msg string) { var ( coName = "kube-apiserver" kubeApiserverCoStatus = map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} ) e2e.Logf("Pre-configuration with %s operator status before %s: %s", coName, msg, preConfigKasStatus) // It takes about 30 seconds for KAS rolling out from deployment to progress // wait some bit more time and double check, to ensure it is stably healthy time.Sleep(45 * time.Second) postConfigKasStatus := getCoStatus(oc, coName, kubeApiserverCoStatus) e2e.Logf("Post-configuration with %s operator status after %s %s", coName, msg, postConfigKasStatus) // Check if KAS operator status is changed after ValidatingWebhook configration creation if !reflect.DeepEqual(preConfigKasStatus, postConfigKasStatus) { if reflect.DeepEqual(preConfigKasStatus, kubeApiserverCoStatus) { // preConfigKasStatus has the same status of kubeApiserverCoStatus, means KAS operator is changed from stable to unstable e2e.Failf("Test step-%s failed: %s operator are abnormal after %s!", step, coName, msg) } } } // createSecretsWithQuotaValidation creates secrets until the quota is reached func createSecretsWithQuotaValidation(oc *exutil.CLI, namespace, clusterQuotaName string, crqLimits map[string]string, caseID string) { // Step 1: Retrieve current secret count secretCount, err := oc.Run("get").Args("-n", namespace, "clusterresourcequota", clusterQuotaName, "-o", `jsonpath={.status.namespaces[*].status.used.secrets}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedCount, _ := strconv.Atoi(secretCount) limits, _ := strconv.Atoi(crqLimits["secrets"]) steps := 1 // Step 2: Create secrets and check if quota limit is reached for i := usedCount; i <= limits; i++ { secretName := fmt.Sprintf("%v-secret-%d", caseID, steps) e2e.Logf("Creating secret %s", secretName) // Attempt to create the secret output, err := oc.Run("create").Args("-n", namespace, "secret", "generic", secretName).Output() // Step 3: Expect failure when reaching the quota limit if i < limits { output1, _ := oc.Run("get").Args("-n", namespace, "secret").Output() e2e.Logf("Get total secrets created to debug :: %s", output1) o.Expect(err).NotTo(o.HaveOccurred()) // Expect success before quota is reached } else { // Expect the specific "exceeded quota" error message if err != nil && strings.Contains(output, "secrets.*forbidden: exceeded quota") { e2e.Logf("Quota limit reached, as expected.") } else { o.Expect(err).To(o.HaveOccurred()) // Fail if any other error occurs } } steps++ } } func checkDisconnect(oc *exutil.CLI) bool { workNode, err := exutil.GetFirstWorkerNode(oc) o.Expect(err).ShouldNot(o.HaveOccurred()) curlCMD := "curl -I ifconfig.me --connect-timeout 5" output, err := exutil.DebugNode(oc, workNode, "bash", "-c", curlCMD) if !strings.Contains(output, "HTTP") || err != nil { e2e.Logf("Unable to access the public Internet from the cluster.") return true } e2e.Logf("Successfully connected to the public Internet from the cluster.") return false } // Validate MicroShift Config func validateMicroshiftConfig(fqdnName string, user string, patternToMatch string) { e2e.Logf("Check manifest config") chkConfigCmd := `sudo /usr/bin/microshift show-config --mode effective 2>/dev/null` re := regexp.MustCompile(patternToMatch) mchkConfig, mchkConfigErr := runSSHCommand(fqdnName, user, chkConfigCmd) o.Expect(mchkConfigErr).NotTo(o.HaveOccurred()) match := re.MatchString(mchkConfig) if !match { e2e.Failf("Config not matched :: \n" + mchkConfig) } } // fetchOpenShiftAPIServerCert fetches the server's certificate and returns it as a PEM-encoded string. func fetchOpenShiftAPIServerCert(apiServerEndpoint string) ([]byte, error) { timeout := 120 * time.Second retryInterval := 20 * time.Second // Create a cancellable context for polling ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() transport := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } proxyURL := getProxyURL() transport.Proxy = http.ProxyURL(proxyURL) // Set up TLS configuration and DialContext transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { return (&net.Dialer{}).DialContext(ctx, network, addr) } client := &http.Client{ Transport: transport, } var pemCert []byte pollFunc := func(ctx context.Context) (done bool, err error) { // Attempt to send a GET request to the OpenShift API server resp, err := client.Get(apiServerEndpoint) if err != nil { e2e.Logf("Error connecting to the OpenShift API server: %v. Retrying...\n", err) return false, nil } defer resp.Body.Close() // Check TLS connection state tlsConnectionState := resp.TLS if tlsConnectionState == nil { return false, fmt.Errorf("No TLS connection established") } // Encode the server's certificate to PEM format cert := tlsConnectionState.PeerCertificates[0] pemCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) if pemCert == nil { return false, fmt.Errorf("Error encoding certificate to PEM") } fmt.Println("Certificate fetched successfully") return true, nil } err := wait.PollUntilContextTimeout(ctx, retryInterval, timeout, true, pollFunc) if err != nil { return nil, fmt.Errorf("failed to fetch certificate within timeout: %w", err) } return pemCert, nil } // Generate a random string with given number of digits func getRandomString(digit int) string { chars := "abcdefghijklmnopqrstuvwxyz0123456789" seed := rand.New(rand.NewSource(time.Now().UnixNano())) buffer := make([]byte, digit) for index := range buffer { buffer[index] = chars[seed.Intn(len(chars))] } return string(buffer) } func getSAToken(oc *exutil.CLI, sa, ns string) (string, error) { e2e.Logf("Getting a token assgined to specific serviceaccount from %s namespace...", ns) token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", sa, "-n", ns).Output() if err != nil { if strings.Contains(token, "unknown command") { // oc client is old version, create token is not supported e2e.Logf("oc create token is not supported by current client, use oc sa get-token instead") token, err = oc.AsAdmin().WithoutNamespace().Run("sa").Args("get-token", sa, "-n", ns).Output() } else { return "", err } } return token, err }
package apiserverauth
function
openshift/openshift-tests-private
ac1aecfc-edaf-480f-9a24-85764949a090
createAdmissionWebhookFromTemplate
['admissionWebhook']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func (admissionHook *admissionWebhook) createAdmissionWebhookFromTemplate(oc *exutil.CLI) { exutil.CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", admissionHook.template, "-p", "NAME="+admissionHook.name, "WEBHOOKNAME="+admissionHook.webhookname, "SERVICENAMESPACE="+admissionHook.servicenamespace, "SERVICENAME="+admissionHook.servicename, "NAMESPACE="+admissionHook.namespace, "APIGROUPS="+admissionHook.apigroups, "APIVERSIONS="+admissionHook.apiversions, "OPERATIONS="+admissionHook.operations, "RESOURCES="+admissionHook.resources, "KIND="+admissionHook.kind, "SHORTNAME="+admissionHook.shortname, "SINGULARNAME="+admissionHook.singularname, "PLURALNAME="+admissionHook.pluralname, "VERSION="+admissionHook.version) }
apiserverauth
function
openshift/openshift-tests-private
3ef6b976-5798-4dc3-942b-2e5d35f63dd7
createServiceFromTemplate
['service']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func (service *service) createServiceFromTemplate(oc *exutil.CLI) { exutil.CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", service.template, "-p", "NAME="+service.name, "CLUSTERIP="+service.clusterip, "NAMESPACE="+service.namespace) }
apiserverauth
function
openshift/openshift-tests-private
24e19507-392b-46c9-a7da-da10044595b4
compareAPIServerWebhookConditions
['"context"', '"errors"', '"io"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"github.com/tidwall/gjson"', 'apierrors "k8s.io/apimachinery/pkg/api/errors"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func compareAPIServerWebhookConditions(oc *exutil.CLI, conditionReason interface{}, conditionStatus string, conditionTypes []string) { for _, webHookErrorConditionType := range conditionTypes { // increase wait time for prow ci failures err := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { webhookError, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiserver/cluster", "-o", `jsonpath='{.status.conditions[?(@.type=="`+webHookErrorConditionType+`")]}'`).Output() o.Expect(err).NotTo(o.HaveOccurred()) //Inline conditional statement for evaluating 1) reason and status together,2) only status. webhookConditionStatus := gjson.Get(webhookError, `status`).String() // If webhook errors from the created flowcollectorconversionwebhook by case OCP-73539, // the webhook condition status will be "True", not the expected "False" if strings.Contains(webhookError, "flows.netobserv.io: dial tcp") { conditionStatus = "True" } isWebhookConditionMet := containsAnyWebHookReason(webhookError, conditionReason) && webhookConditionStatus == conditionStatus if isWebhookConditionMet { e2e.Logf("kube-apiserver admission webhook errors as \n %s ::: %s ::: %s ::: %s", conditionStatus, webhookError, webHookErrorConditionType, conditionReason) o.Expect(webhookError).Should(o.MatchRegexp(`"type":"%s"`, webHookErrorConditionType), "Mismatch in 'type' of admission errors reported") o.Expect(webhookError).Should(o.MatchRegexp(`"status":"%s"`, conditionStatus), "Mismatch in 'status' of admission errors reported") return true, nil } // Adding logging for more debug e2e.Logf("Retrying for expected kube-apiserver admission webhook error ::: %s ::: %s ::: %s ::: %s", conditionStatus, webhookError, webHookErrorConditionType, conditionReason) return false, nil }) if err != nil { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ValidatingWebhookConfiguration").Output() e2e.Logf("#### Debug #### List all ValidatingWebhookConfiguration when the case runs into failures:%s\n", output) exutil.AssertWaitPollNoErr(err, "Test Fail: Expected Kube-apiserver admissionwebhook errors not present.") } } }
apiserverauth
function
openshift/openshift-tests-private
87903867-9c68-4f0c-a924-20d2d28a2ff7
GetEncryptionPrefix
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'configv1 "github.com/openshift/api/config/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func GetEncryptionPrefix(oc *exutil.CLI, key string) (string, error) { var etcdPodName string encryptionType, err1 := oc.WithoutNamespace().Run("get").Args("apiserver/cluster", "-o=jsonpath={.spec.encryption.type}").Output() o.Expect(err1).NotTo(o.HaveOccurred()) if encryptionType != "aesabc" && encryptionType != "aesgcm" { e2e.Logf("The etcd is not encrypted on!") } err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { podName, err := oc.WithoutNamespace().Run("get").Args("pods", "-n", "openshift-etcd", "-l=etcd", "-o=jsonpath={.items[0].metadata.name}").Output() if err != nil { e2e.Logf("Fail to get etcd pod, error: %s. Trying again", err) return false, nil } etcdPodName = podName return true, nil }) if err != nil { return "", err } var encryptionPrefix string err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { prefix, err := oc.WithoutNamespace().Run("rsh").Args("-n", "openshift-etcd", "-c", "etcd", etcdPodName, "bash", "-c", `etcdctl get `+key+` --prefix -w fields | grep -e "Value" | grep -o k8s:enc:`+encryptionType+`:v1:[^:]*: | head -n 1`).Output() if err != nil { e2e.Logf("Fail to rsh into etcd pod, error: %s. Trying again", err) return false, nil } encryptionPrefix = prefix return true, nil }) if err != nil { return "", err } return encryptionPrefix, nil }
apiserverauth
function
openshift/openshift-tests-private
866ed514-31f1-4fe1-b42e-4694c9d17fba
GetEncryptionKeyNumber
['"regexp"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func GetEncryptionKeyNumber(oc *exutil.CLI, patten string) (int, error) { secretNames, err := oc.WithoutNamespace().Run("get").Args("secrets", "-n", "openshift-config-managed", `-o=jsonpath={.items[*].metadata.name}`, "--sort-by=metadata.creationTimestamp").Output() if err != nil { e2e.Logf("Fail to get secret, error: %s", err) return 0, nil } rePattern := regexp.MustCompile(patten) locs := rePattern.FindAllStringIndex(secretNames, -1) i, j := locs[len(locs)-1][0], locs[len(locs)-1][1] maxSecretName := secretNames[i:j] strSlice := strings.Split(maxSecretName, "-") var number int number, err = strconv.Atoi(strSlice[len(strSlice)-1]) if err != nil { e2e.Logf("Fail to get secret, error: %s", err) return 0, nil } return number, nil }
apiserverauth
function
openshift/openshift-tests-private
a25bf5a8-7b2a-4f1e-8827-37be7b782db4
WaitEncryptionKeyMigration
['"context"', '"errors"', '"io"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'apierrors "k8s.io/apimachinery/pkg/api/errors"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func WaitEncryptionKeyMigration(oc *exutil.CLI, secret string) (bool, error) { var pattern string var waitTime time.Duration if strings.Contains(secret, "openshift-apiserver") { pattern = `migrated-resources: .*route.openshift.io.*routes` waitTime = 15 * time.Minute } else if strings.Contains(secret, "openshift-kube-apiserver") { pattern = `migrated-resources: .*configmaps.*secrets.*` waitTime = 30 * time.Minute // see below explanation } else { return false, errors.New("Unknown key " + secret) } rePattern := regexp.MustCompile(pattern) // In observation, the waiting time in max can take 25 mins if it is kube-apiserver, // and 12 mins if it is openshift-apiserver, so the Poll parameters are long. err := wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, waitTime, false, func(cxt context.Context) (bool, error) { output, err := oc.WithoutNamespace().Run("get").Args("secrets", secret, "-n", "openshift-config-managed", "-o=yaml").Output() if err != nil { e2e.Logf("Fail to get the encryption key secret %s, error: %s. Trying again", secret, err) return false, nil } matchedStr := rePattern.FindString(output) if matchedStr == "" { e2e.Logf("Not yet see migrated-resources. Trying again") return false, nil } e2e.Logf("Saw all migrated-resources:\n%s", matchedStr) return true, nil }) if err != nil { return false, err } return true, nil }
apiserverauth
function
openshift/openshift-tests-private
cf5bd0f4-5e2d-4b88-b9d8-1d93a254b2bf
CheckIfResourceAvailable
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func CheckIfResourceAvailable(oc *exutil.CLI, resource string, resourceNames []string, namespace ...string) (string, bool) { args := append([]string{resource}, resourceNames...) if len(namespace) == 1 { args = append(args, "-n", namespace[0]) // HACK: implement no namespace input } out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output() if err == nil { for _, resourceName := range resourceNames { o.Expect(out).Should(o.ContainSubstring(resourceName)) return out, true } } else { e2e.Logf("Debug logs :: Resource '%s' not found :: %s :: %s\n", resource, out, err.Error()) return out, false } return "", true }
apiserverauth
function
openshift/openshift-tests-private
009080c4-df65-4f7f-aeaa-43b72f98d91e
waitCoBecomes
['"context"', '"reflect"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func waitCoBecomes(oc *exutil.CLI, coName string, waitTime int, expectedStatus map[string]string) error { errCo := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { gottenStatus := getCoStatus(oc, coName, expectedStatus) eq := reflect.DeepEqual(expectedStatus, gottenStatus) if eq { eq := reflect.DeepEqual(expectedStatus, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) if eq { // For True False False, we want to wait some bit more time and double check, to ensure it is stably healthy time.Sleep(100 * time.Second) gottenStatus := getCoStatus(oc, coName, expectedStatus) eq := reflect.DeepEqual(expectedStatus, gottenStatus) if eq { e2e.Logf("Given operator %s becomes available/non-progressing/non-degraded", coName) return true, nil } } else { e2e.Logf("Given operator %s becomes %s", coName, gottenStatus) return true, nil } } return false, nil }) if errCo != nil { err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } return errCo }
apiserverauth
function
openshift/openshift-tests-private
b9f23852-6a0f-4a95-a27c-d7c5d6c8aaec
getCoStatus
['"fmt"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) map[string]string { newStatusToCompare := make(map[string]string) for key := range statusToCompare { args := fmt.Sprintf(`-o=jsonpath={.status.conditions[?(.type == '%s')].status}`, key) status, _ := getResource(oc, asAdmin, withoutNamespace, "co", coName, args) newStatusToCompare[key] = status } return newStatusToCompare }
apiserverauth
function
openshift/openshift-tests-private
70d483ee-d3ba-413b-a62e-85a8f92c17d0
verifyCiphers
['"context"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func verifyCiphers(oc *exutil.CLI, expectedCipher string, operator string) error { return wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { switch operator { case "openshift-authentication": e2e.Logf("Get the ciphers for openshift-authentication:") getadminoutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-authentication", "v4-0-config-system-cliconfig", "-o=jsonpath='{.data.v4-0-config-system-cliconfig}'").Output() if err == nil { // Use jqCMD to call jq because .servingInfo part JSON comming in string format jqCMD := fmt.Sprintf(`echo %s | jq -cr '.servingInfo | "\(.cipherSuites) \(.minTLSVersion)"'|tr -d '\n'`, getadminoutput) output, err := exec.Command("bash", "-c", jqCMD).Output() o.Expect(err).NotTo(o.HaveOccurred()) gottenCipher := string(output) e2e.Logf("Comparing the ciphers: %s with %s", expectedCipher, gottenCipher) if expectedCipher == gottenCipher { e2e.Logf("Ciphers are matched: %s", gottenCipher) return true, nil } e2e.Logf("Ciphers are not matched: %s", gottenCipher) return false, nil } return false, nil case "openshiftapiservers.operator", "kubeapiservers.operator": e2e.Logf("Get the ciphers for %s:", operator) getadminoutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(operator, "cluster", "-o=jsonpath={.spec.observedConfig.servingInfo['cipherSuites', 'minTLSVersion']}").Output() if err == nil { e2e.Logf("Comparing the ciphers: %s with %s", expectedCipher, getadminoutput) if expectedCipher == getadminoutput { e2e.Logf("Ciphers are matched: %s", getadminoutput) return true, nil } e2e.Logf("Ciphers are not matched: %s", getadminoutput) return false, nil } return false, nil default: e2e.Logf("Operators parameters not correct..") } return false, nil }) }
apiserverauth
function
openshift/openshift-tests-private
385d8398-8818-4c6a-ba6b-519c0428e26c
restoreClusterOcp41899
['"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func restoreClusterOcp41899(oc *exutil.CLI) { e2e.Logf("Checking openshift-controller-manager operator should be Available") expectedStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} err := waitCoBecomes(oc, "openshift-controller-manager", 500, expectedStatus) exutil.AssertWaitPollNoErr(err, "openshift-controller-manager operator is not becomes available") output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "openshift-config").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, "client-ca-custom") { configmapErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "client-ca-custom", "-n", "openshift-config").Execute() o.Expect(configmapErr).NotTo(o.HaveOccurred()) e2e.Logf("Cluster configmap reset to default values") } else { e2e.Logf("Cluster configmap not changed from default values") } }
apiserverauth
function
openshift/openshift-tests-private
06cca9ed-2a09-4865-b789-bdb241ddb9ac
checkClusterLoad
['"context"', '"fmt"', '"io"', '"os/exec"', '"regexp"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func checkClusterLoad(oc *exutil.CLI, nodeType, dirname string) (int, int) { var tmpPath string var errAdm error errAdmNode := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { tmpPath, errAdm = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "nodes", "-l", "node-role.kubernetes.io/"+nodeType, "--no-headers").OutputToFile(dirname) if errAdm != nil { return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(errAdmNode, fmt.Sprintf("Not able to run adm top command :: %v", errAdm)) cmd := fmt.Sprintf(`cat %v | grep -v 'protocol-buffers' | awk '{print $3}'|awk -F '%%' '{ sum += $1 } END { print(sum / NR) }'|cut -d "." -f1`, tmpPath) cpuAvg, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cmd = fmt.Sprintf(`cat %v | grep -v 'protocol-buffers' | awk '{print $5}'|awk -F'%%' '{ sum += $1 } END { print(sum / NR) }'|cut -d "." -f1`, tmpPath) memAvg, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) re, _ := regexp.Compile(`[^\w]`) cpuAvgs := string(cpuAvg) memAvgs := string(memAvg) cpuAvgs = re.ReplaceAllString(cpuAvgs, "") memAvgs = re.ReplaceAllString(memAvgs, "") cpuAvgVal, _ := strconv.Atoi(cpuAvgs) memAvgVal, _ := strconv.Atoi(memAvgs) return cpuAvgVal, memAvgVal }
apiserverauth
function
openshift/openshift-tests-private
515b6a08-e6da-4bdd-96d2-dcdba15f6538
checkResources
['"fmt"', '"os/exec"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func checkResources(oc *exutil.CLI, dirname string) map[string]string { resUsedDet := make(map[string]string) resUsed := []string{"secrets", "deployments", "namespaces", "pods"} for _, key := range resUsed { tmpPath, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(key, "-A", "--no-headers").OutputToFile(dirname) o.Expect(err).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf(`cat %v | wc -l | awk '{print $1}'`, tmpPath) output, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) resUsedDet[key] = string(output) } return resUsedDet }
apiserverauth
function
openshift/openshift-tests-private
40f34256-953e-4634-ac60-8e4bc4e92e36
getTestDataFilePath
['"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getTestDataFilePath(filename string) string { // returns the file path of the testdata files with respect to apiserverauth subteam. apiDirName := "apiserverauth" apiBaseDir := "" if apiBaseDir = fixturePathCache[apiDirName]; len(apiBaseDir) == 0 { e2e.Logf("apiserver fixture dir is not initialized, start to create") apiBaseDir = exutil.FixturePath("testdata", apiDirName) fixturePathCache[apiDirName] = apiBaseDir e2e.Logf("apiserver fixture dir is initialized: %s", apiBaseDir) } else { apiBaseDir = fixturePathCache[apiDirName] e2e.Logf("apiserver fixture dir found in cache: %s", apiBaseDir) } return filepath.Join(apiBaseDir, filename) }
apiserverauth
function
openshift/openshift-tests-private
7f63287e-a5eb-4c81-9122-31f5f2b22080
checkCoStatus
['"reflect"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func checkCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) { // Check ,compare and assert the current cluster operator status against the expected status given. currentCoStatus := getCoStatus(oc, coName, statusToCompare) o.Expect(reflect.DeepEqual(currentCoStatus, statusToCompare)).To(o.Equal(true), "Wrong %s CO status reported, actual status : %s", coName, currentCoStatus) }
apiserverauth
function
openshift/openshift-tests-private
b68c6594-2f57-44bd-9260-aae69511822f
getNodePortRange
['"regexp"', '"strconv"']
['service']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getNodePortRange(oc *exutil.CLI) (int, int) { // Follow the steps in https://docs.openshift.com/container-platform/4.11/networking/configuring-node-port-service-range.html output, err := oc.AsAdmin().Run("get").Args("configmaps", "-n", "openshift-kube-apiserver", "config", `-o=jsonpath="{.data['config\.yaml']}"`).Output() o.Expect(err).NotTo(o.HaveOccurred()) rgx := regexp.MustCompile(`"service-node-port-range":\["([0-9]*)-([0-9]*)"\]`) rs := rgx.FindSubmatch([]byte(output)) o.Expect(rs).To(o.HaveLen(3)) leftBound, err := strconv.Atoi(string(rs[1])) o.Expect(err).NotTo(o.HaveOccurred()) rightBound, err := strconv.Atoi(string(rs[2])) o.Expect(err).NotTo(o.HaveOccurred()) return leftBound, rightBound }
apiserverauth
function
openshift/openshift-tests-private
d02ae947-6d5f-4cd1-88e4-c347b359ed47
getRandomNum
['"math/rand"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getRandomNum(m int32, n int32) int32 { rand.Seed(time.Now().UnixNano()) return rand.Int31n(n-m+1) + m }
apiserverauth
function
openshift/openshift-tests-private
3fb86262-2174-449e-bcea-9a8bb06267e1
countResource
['"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func countResource(oc *exutil.CLI, resource string, namespace string) (int, error) { output, err := oc.Run("get").Args(resource, "-n", namespace, "-o", "jsonpath='{.items[*].metadata.name}'").Output() output = strings.Trim(strings.Trim(output, " "), "'") if output == "" { return 0, err } resources := strings.Split(output, " ") return len(resources), err }
apiserverauth
function
openshift/openshift-tests-private
433e5bf1-70d3-460f-b68c-8ed467772fc2
GetAlertsByName
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func GetAlertsByName(oc *exutil.CLI, alertName string) (string, error) { mon, monErr := exutil.NewPrometheusMonitor(oc.AsAdmin()) if monErr != nil { return "", monErr } allAlerts, allAlertErr := mon.GetAlerts() if allAlertErr != nil { return "", allAlertErr } return allAlerts, nil }
apiserverauth
function
openshift/openshift-tests-private
f42187c6-8e40-40d2-b06d-af1abab38612
isSNOCluster
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isSNOCluster(oc *exutil.CLI) bool { //Only 1 master, 1 worker node and with the same hostname. masterNodes, _ := exutil.GetClusterNodesBy(oc, "master") workerNodes, _ := exutil.GetClusterNodesBy(oc, "worker") if len(masterNodes) == 1 && len(workerNodes) == 1 && masterNodes[0] == workerNodes[0] { return true } return false }
apiserverauth
function
openshift/openshift-tests-private
1e5c30ff-c876-4b90-bc7f-1c0270210b0b
LoadCPUMemWorkload
['"errors"', '"fmt"', '"io"', '"net"', '"net/http"', '"os"', '"os/exec"', '"regexp"', '"strconv"', '"strings"', '"time"', 'apierrors "k8s.io/apimachinery/pkg/api/errors"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func LoadCPUMemWorkload(oc *exutil.CLI, workLoadtime int) { var ( workerCPUtopstr string workerCPUtopint int workerMEMtopstr string workerMEMtopint int n int m int r int dn int cpuMetric = 800 memMetric = 700 reserveCPUP = 50 reserveMemP = 50 snoPodCapacity = 250 reservePodCapacity = 120 ) workerCPUtopall := []int{} workerMEMtopall := []int{} randomStr := exutil.GetRandomString() dirname := fmt.Sprintf("/tmp/-load-cpu-mem_%s/", randomStr) defer os.RemoveAll(dirname) os.MkdirAll(dirname, 0755) workerNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/master", "--no-headers").OutputToFile("load-cpu-mem_" + randomStr + "-log") o.Expect(err).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf(`cat %v |head -1 | awk '{print $1}'`, workerNode) cmdOut, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) worker1 := strings.Replace(string(cmdOut), "\n", "", 1) // Check if there is an node.metrics on node err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodemetrics", worker1).Execute() var workerTop string if err == nil { workerTop, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "node", worker1, "--no-headers=true").Output() o.Expect(err).NotTo(o.HaveOccurred()) } cpuUsageCmd := fmt.Sprintf(`echo "%v" | awk '{print $2}'`, workerTop) cpuUsage, err := exec.Command("bash", "-c", cpuUsageCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cpu1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cpuUsage), "") cpu, _ := strconv.Atoi(cpu1) cpuUsageCmdP := fmt.Sprintf(`echo "%v" | awk '{print $3}'`, workerTop) cpuUsageP, err := exec.Command("bash", "-c", cpuUsageCmdP).Output() o.Expect(err).NotTo(o.HaveOccurred()) cpuP1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cpuUsageP), "") cpuP, _ := strconv.Atoi(cpuP1) totalCPU := int(float64(cpu) / (float64(cpuP) / 100)) cmd = fmt.Sprintf(`cat %v | awk '{print $1}'`, workerNode) workerCPU1, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) workerCPU := strings.Fields(string(workerCPU1)) workerNodeCount := len(workerCPU) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < len(workerCPU); i++ { // Check if there is node.metrics on node err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodemetrics", workerCPU[i]).Execute() var workerCPUtop string if err == nil { workerCPUtop, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "node", workerCPU[i], "--no-headers=true").OutputToFile("load-cpu-mem_" + randomStr + "-log") o.Expect(err).NotTo(o.HaveOccurred()) } workerCPUtopcmd := fmt.Sprintf(`cat %v | awk '{print $3}'`, workerCPUtop) workerCPUUsage, err := exec.Command("bash", "-c", workerCPUtopcmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) workerCPUtopstr = regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(workerCPUUsage), "") workerCPUtopint, _ = strconv.Atoi(workerCPUtopstr) workerCPUtopall = append(workerCPUtopall, workerCPUtopint) } for j := 1; j < len(workerCPU); j++ { if workerCPUtopall[0] < workerCPUtopall[j] { workerCPUtopall[0] = workerCPUtopall[j] } } cpuMax := workerCPUtopall[0] availableCPU := int(float64(totalCPU) * (100 - float64(reserveCPUP) - float64(cpuMax)) / 100) e2e.Logf("----> Cluster has total CPU, Reserved CPU percentage, Max CPU of node :%v,%v,%v", totalCPU, reserveCPUP, cpuMax) n = int(availableCPU / int(cpuMetric)) if n <= 0 { e2e.Logf("No more CPU resource is available, no load will be added!") } else { if workerNodeCount == 1 { dn = 1 r = 2 } else { dn = 2 if n > workerNodeCount { r = 3 } else { r = workerNodeCount } } // Get the available pods of worker nodes, based on this, the upper limit for a namespace is calculated cmd1 := fmt.Sprintf(`oc describe node/%s | grep 'Non-terminated Pods' | grep -oP "[0-9]+"`, worker1) cmdOut1, err := exec.Command("bash", "-c", cmd1).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedPods, err := strconv.Atoi(regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cmdOut1), "")) o.Expect(err).NotTo(o.HaveOccurred()) availablePods := snoPodCapacity - usedPods - reservePodCapacity if workerNodeCount > 1 { availablePods = availablePods * workerNodeCount } nsMax := int(availablePods / dn / r) if nsMax > 0 { if n > nsMax { n = nsMax } } else { n = 1 r = 1 dn = 1 } e2e.Logf("Start CPU load ...") cpuloadCmd := fmt.Sprintf(`clusterbuster --basename=cpuload --workload=cpusoaker --namespaces=%v --processes=1 --deployments=%v --node-selector=node-role.kubernetes.io/master --tolerate=node-role.kubernetes.io/master:Equal:NoSchedule --workloadruntime=7200 --report=none > %v &`, n, dn, dirname+"clusterbuster-cpu-log") e2e.Logf("%v", cpuloadCmd) cmd := exec.Command("bash", "-c", cpuloadCmd) cmdErr := cmd.Start() o.Expect(cmdErr).NotTo(o.HaveOccurred()) // Wait for 3 mins(this time is based on many tests), when the load starts, it will reach a peak within a few minutes, then falls back. time.Sleep(180 * time.Second) e2e.Logf("----> Created cpuload related pods: %v", n*r*dn) } memUsageCmd := fmt.Sprintf(`echo "%v" | awk '{print $4}'`, workerTop) memUsage, err := exec.Command("bash", "-c", memUsageCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) mem1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(memUsage), "") mem, _ := strconv.Atoi(mem1) memUsageCmdP := fmt.Sprintf(`echo "%v" | awk '{print $5}'`, workerTop) memUsageP, err := exec.Command("bash", "-c", memUsageCmdP).Output() o.Expect(err).NotTo(o.HaveOccurred()) memP1 := regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(memUsageP), "") memP, _ := strconv.Atoi(memP1) totalMem := int(float64(mem) / (float64(memP) / 100)) for i := 0; i < len(workerCPU); i++ { // Check if there is node.metrics on node err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodemetrics", workerCPU[i]).Execute() var workerMEMtop string if err == nil { workerMEMtop, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "node", workerCPU[i], "--no-headers=true").OutputToFile("load-cpu-mem_" + randomStr + "-log") o.Expect(err).NotTo(o.HaveOccurred()) } workerMEMtopcmd := fmt.Sprintf(`cat %v | awk '{print $5}'`, workerMEMtop) workerMEMUsage, err := exec.Command("bash", "-c", workerMEMtopcmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) workerMEMtopstr = regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(workerMEMUsage), "") workerMEMtopint, _ = strconv.Atoi(workerMEMtopstr) workerMEMtopall = append(workerMEMtopall, workerMEMtopint) } for j := 1; j < len(workerCPU); j++ { if workerMEMtopall[0] < workerMEMtopall[j] { workerMEMtopall[0] = workerMEMtopall[j] } } memMax := workerMEMtopall[0] availableMem := int(float64(totalMem) * (100 - float64(reserveMemP) - float64(memMax)) / 100) m = int(availableMem / int(memMetric)) e2e.Logf("----> Cluster has total Mem, Reserved Mem percentage, Max memory of node :%v,%v,%v", totalMem, reserveMemP, memMax) if m <= 0 { e2e.Logf("No more memory resource is available, no load will be added!") } else { if workerNodeCount == 1 { dn = 1 r = 2 } else { r = workerNodeCount if m > workerNodeCount { dn = m } else { dn = workerNodeCount } } // Get the available pods of worker nodes, based on this, the upper limit for a namespace is calculated cmd1 := fmt.Sprintf(`oc describe node/%v | grep 'Non-terminated Pods' | grep -oP "[0-9]+"`, worker1) cmdOut1, err := exec.Command("bash", "-c", cmd1).Output() o.Expect(err).NotTo(o.HaveOccurred()) usedPods, err := strconv.Atoi(regexp.MustCompile(`[^0-9 ]+`).ReplaceAllString(string(cmdOut1), "")) o.Expect(err).NotTo(o.HaveOccurred()) availablePods := snoPodCapacity - usedPods - reservePodCapacity if workerNodeCount > 1 { availablePods = availablePods * workerNodeCount // Reduce the number pods in which workers create memory loads concurrently, avoid kubelet crash if availablePods > 200 { availablePods = int(availablePods / 2) } } nsMax := int(availablePods / dn / r) if nsMax > 0 { if m > nsMax { m = nsMax } } else { m = 1 r = 1 dn = 1 } e2e.Logf("Start Memory load ...") memloadCmd := fmt.Sprintf(`clusterbuster --basename=memload --workload=memory --namespaces=%v --processes=1 --deployments=%v --node-selector=node-role.kubernetes.io/master --tolerate=node-role.kubernetes.io/master:Equal:NoSchedule --workloadruntime=7200 --report=none> %v &`, m, dn, dirname+"clusterbuster-mem-log") e2e.Logf("%v", memloadCmd) cmd := exec.Command("bash", "-c", memloadCmd) cmdErr := cmd.Start() o.Expect(cmdErr).NotTo(o.HaveOccurred()) // Wait for 5 mins, ensure that all load pods are strated up. time.Sleep(300 * time.Second) e2e.Logf("----> Created memload related pods: %v", m*r*dn) } // If load are landed, will do some checking with logs if n > 0 || m > 0 { keywords := "body: net/http: request canceled (Client.Timeout|panic" bustercmd := fmt.Sprintf(`cat %v | grep -iE '%s' || true`, dirname+"clusterbuster*", keywords) busterLogs, err := exec.Command("bash", "-c", bustercmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(busterLogs) > 0 { e2e.Logf("%s", busterLogs) e2e.Logf("Found some panic or timeout errors, if errors are potential bug then file a bug.") } else { e2e.Logf("No errors found in clusterbuster logs") } } else { e2e.Logf("No more CPU and memory resource, no any load is added.") } }
apiserverauth
function
openshift/openshift-tests-private
fe50f6f6-dc2d-4009-9c56-ba04bb89e010
CopyToFile
['"io"', '"os"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func CopyToFile(fromPath string, toFilename string) string { // check if source file is regular file srcFileStat, err := os.Stat(fromPath) if err != nil { e2e.Failf("get source file %s stat failed: %v", fromPath, err) } if !srcFileStat.Mode().IsRegular() { e2e.Failf("source file %s is not a regular file", fromPath) } // open source file source, err := os.Open(fromPath) if err != nil { e2e.Failf("open source file %s failed: %v", fromPath, err) } defer source.Close() // open dest file saveTo := filepath.Join(e2e.TestContext.OutputDir, toFilename) dest, err := os.Create(saveTo) if err != nil { e2e.Failf("open destination file %s failed: %v", saveTo, err) } defer dest.Close() // copy from source to dest _, err = io.Copy(dest, source) if err != nil { e2e.Failf("copy file from %s to %s failed: %v", fromPath, saveTo, err) } return saveTo }
apiserverauth
function
openshift/openshift-tests-private
4ef73d14-661d-4fbc-a36e-49ded7c9b675
ExecCommandOnPod
['"context"', '"crypto/tls"', '"fmt"', '"os/exec"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func ExecCommandOnPod(oc *exutil.CLI, podname string, namespace string, command string) string { var podOutput string var execpodErr error errExec := wait.PollUntilContextTimeout(context.Background(), 15*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { podOutput, execpodErr = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podname, "--", "/bin/sh", "-c", command).Output() podOutput = strings.TrimSpace(podOutput) e2e.Logf("Attempting to execute command on pod %v. Output: %v, Error: %v", podname, podOutput, execpodErr) if execpodErr != nil { // Check for TLS internal error and handle CSR approval if detected, https://access.redhat.com/solutions/4307511 matchTLS, _ := regexp.MatchString(`(?i)tls.*internal error`, podOutput) if matchTLS { e2e.Logf("Detected TLS error in output for pod %v: %v", podname, podOutput) // Attempt to approve any pending CSRs getCsr, getCsrErr := getPendingCSRs(oc) if getCsrErr != nil { e2e.Logf("Error retrieving pending CSRs: %v", getCsrErr) return false, nil } for _, csr := range getCsr { e2e.Logf("Approving CSR: %v", csr) appCsrErr := oc.WithoutNamespace().AsAdmin().Run("adm").Args("certificate", "approve", csr).Execute() if appCsrErr != nil { e2e.Logf("Error approving CSR %v: %v", csr, appCsrErr) return false, nil } } e2e.Logf("Pending CSRs approved. Retrying command on pod %v...", podname) return false, nil } else { e2e.Logf("Command execution error on pod %v: %v", podname, execpodErr) return false, nil } } else if podOutput != "" { e2e.Logf("Successfully retrieved non-empty output from pod %v: %v", podname, podOutput) return true, nil } else { e2e.Logf("Received empty output from pod %v. Retrying...", podname) return false, nil } }) exutil.AssertWaitPollNoErr(errExec, fmt.Sprintf("Unable to run command on pod %v :: %v :: Output: %v :: Error: %v", podname, command, podOutput, execpodErr)) return podOutput }
apiserverauth
function
openshift/openshift-tests-private
98489ece-462b-4c52-9304-9126d7ea880d
clusterHealthcheck
['"fmt"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func clusterHealthcheck(oc *exutil.CLI, dirname string) error { err := clusterNodesHealthcheck(oc, 600, dirname) if err != nil { return fmt.Errorf("Cluster nodes health check failed. Abnormality found in nodes.") } err = clusterOperatorHealthcheck(oc, 1500, dirname) if err != nil { return fmt.Errorf("Cluster operators health check failed. Abnormality found in cluster operators.") } err = clusterPodsHealthcheck(oc, 600, dirname) if err != nil { return fmt.Errorf("Cluster pods health check failed. Abnormality found in pods.") } return nil }
apiserverauth
function
openshift/openshift-tests-private
f761551e-88d0-40a8-a3d5-f2410f24968f
clusterOperatorHealthcheck
['"context"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func clusterOperatorHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error { e2e.Logf("Check the abnormal operators") errCo := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { coLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "--no-headers").OutputToFile(dirname) if err == nil { cmd := fmt.Sprintf(`cat %v | grep -v '.True.*False.*False' || true`, coLogFile) coLogs, err := exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(coLogs) > 0 { return false, nil } } else { return false, nil } err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("No abnormality found in cluster operators...") return true, nil }) if errCo != nil { err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } return errCo }
apiserverauth
function
openshift/openshift-tests-private
4660802f-0e28-4250-9567-463832960882
clusterPodsHealthcheck
['"context"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func clusterPodsHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error { e2e.Logf("Check the abnormal pods") var podLogs []byte errPod := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { podLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-A").OutputToFile(dirname) if err == nil { cmd := fmt.Sprintf(`cat %v | grep -ivE 'Running|Completed|namespace|installer' || true`, podLogFile) podLogs, err = exec.Command("bash", "-c", cmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(podLogs) > 0 { return false, nil } } else { return false, nil } e2e.Logf("No abnormality found in pods...") return true, nil }) if errPod != nil { e2e.Logf("%s", podLogs) } return errPod }
apiserverauth
function
openshift/openshift-tests-private
ce7c0b30-ddab-4d03-ac0c-7134dece6592
clusterNodesHealthcheck
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func clusterNodesHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error { errNode := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output() if err == nil { if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") { return false, nil } } else { return false, nil } e2e.Logf("Nodes are normal...") err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute() o.Expect(err).NotTo(o.HaveOccurred()) return true, nil }) if errNode != nil { err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } return errNode }
apiserverauth
function
openshift/openshift-tests-private
740cb1f6-3e1c-4f4e-a879-33a5a5c6ca80
apiserverReadinessProbe
['"crypto/tls"', '"fmt"', '"io/ioutil"', '"net/http"', '"net/url"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func apiserverReadinessProbe(tokenValue string, apiserverName string) string { timeoutDuration := 3 * time.Second var bodyString string url := fmt.Sprintf(`%s/apis`, apiserverName) req, err := http.NewRequest("GET", url, nil) if err != nil { e2e.Failf("error creating request: %v", err) } req.Header.Set("Authorization", "Bearer "+tokenValue) req.Header.Set("X-OpenShift-Internal-If-Not-Ready", "reject") transport := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } client := &http.Client{ Transport: transport, Timeout: timeoutDuration, } errCurl := wait.PollImmediate(1*time.Second, 300*time.Second, func() (bool, error) { resp, err := client.Do(req) if err != nil { e2e.Logf("Error while making curl request :: %v", err) return false, nil } defer resp.Body.Close() if resp.StatusCode == 429 { bodyBytes, _ := ioutil.ReadAll(resp.Body) bodyString = string(bodyBytes) return strings.Contains(bodyString, "The apiserver hasn't been fully initialized yet, please try again later"), nil } return false, nil }) exutil.AssertWaitPollNoErr(errCurl, fmt.Sprintf("error waiting for API server readiness: %v", errCurl)) return bodyString }
apiserverauth
function
openshift/openshift-tests-private
d5499aa9-603c-4890-b45c-74d7ee16bd09
getServiceIP
['"context"', '"math/rand"', '"net"', '"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['service']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getServiceIP(oc *exutil.CLI, clusterIP string) net.IP { var serviceIP net.IP err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { randomServiceIP := net.ParseIP(clusterIP).To4() if randomServiceIP != nil { randomServiceIP[3] += byte(rand.Intn(254 - 1)) } else { randomServiceIP = net.ParseIP(clusterIP).To16() randomServiceIP[len(randomServiceIP)-1] = byte(rand.Intn(254 - 1)) } output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-A", `-o=jsonpath={.items[*].spec.clusterIP}`).Output() o.Expect(err).NotTo(o.HaveOccurred()) if matched, _ := regexp.MatchString(randomServiceIP.String(), output); matched { e2e.Logf("IP %v has been used!", randomServiceIP) return false, nil } serviceIP = randomServiceIP return true, nil }) exutil.AssertWaitPollNoErr(err, "Failed to get one available service IP!") return serviceIP }
apiserverauth
function
openshift/openshift-tests-private
b558d000-2459-423e-a196-8cc2e8feef4b
doAction
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) { if asAdmin && withoutNamespace { return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output() } if asAdmin && !withoutNamespace { return oc.AsAdmin().Run(action).Args(parameters...).Output() } if !asAdmin && withoutNamespace { return oc.WithoutNamespace().Run(action).Args(parameters...).Output() } if !asAdmin && !withoutNamespace { return oc.Run(action).Args(parameters...).Output() } return "", nil }
apiserverauth
function
openshift/openshift-tests-private
981e74ee-d638-448a-b464-76f5ecb8e5ab
getResource
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) { return doAction(oc, "get", asAdmin, withoutNamespace, parameters...) }
apiserverauth
function
openshift/openshift-tests-private
846b3c36-e965-456d-8d8e-9045fcd1ad3e
getResourceToBeReady
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getResourceToBeReady(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) string { var result string var err error errPoll := wait.PollUntilContextTimeout(context.Background(), 6*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) { result, err = doAction(oc, "get", asAdmin, withoutNamespace, parameters...) if err != nil || len(result) == 0 { e2e.Logf("Unable to retrieve the expected resource, retrying...") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(errPoll, fmt.Sprintf("Failed to retrieve %v", parameters)) e2e.Logf("The resource returned:\n%v", result) return result }
apiserverauth
function
openshift/openshift-tests-private
6e889f1e-66f6-4ea1-aa60-f7e4efde3dc5
getGlobalProxy
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getGlobalProxy(oc *exutil.CLI) (string, string, string) { httpProxy, err := getResource(oc, asAdmin, withoutNamespace, "proxy", "cluster", "-o=jsonpath={.status.httpProxy}") o.Expect(err).NotTo(o.HaveOccurred()) httpsProxy, err := getResource(oc, asAdmin, withoutNamespace, "proxy", "cluster", "-o=jsonpath={.status.httpsProxy}") o.Expect(err).NotTo(o.HaveOccurred()) noProxy, err := getResource(oc, asAdmin, withoutNamespace, "proxy", "cluster", "-o=jsonpath={.status.noProxy}") o.Expect(err).NotTo(o.HaveOccurred()) return httpProxy, httpsProxy, noProxy }
apiserverauth
function
openshift/openshift-tests-private
1bd78d91-79c8-4220-91a9-f5c367b74f7b
getPodsListByLabel
['"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getPodsListByLabel(oc *exutil.CLI, namespace string, selectorLabel string) []string { podsOp := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pod", "-n", namespace, "-l", selectorLabel, "-o=jsonpath={.items[*].metadata.name}") o.Expect(podsOp).NotTo(o.BeEmpty()) return strings.Split(podsOp, " ") }
apiserverauth
function
openshift/openshift-tests-private
6514d285-1cce-4ee7-84a4-1bf1dcd22866
checkApiserversAuditPolicies
['"regexp"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func checkApiserversAuditPolicies(oc *exutil.CLI, auditPolicyName string) { e2e.Logf("Checking the current " + auditPolicyName + " audit policy of cluster") defaultProfile := getResourceToBeReady(oc, asAdmin, withoutNamespace, "apiserver/cluster", `-o=jsonpath={.spec.audit.profile}`) o.Expect(defaultProfile).Should(o.ContainSubstring(auditPolicyName), "current audit policy of cluster is not default :: "+defaultProfile) e2e.Logf("Checking the audit config file of kube-apiserver currently in use.") podsList := getPodsListByLabel(oc.AsAdmin(), "openshift-kube-apiserver", "app=openshift-kube-apiserver") execKasOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-kube-apiserver", "ls /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-audit-policies/") re := regexp.MustCompile(`policy.yaml`) matches := re.FindAllString(execKasOuptut, -1) if len(matches) == 0 { e2e.Failf("Audit config file of kube-apiserver is wrong :: %s", execKasOuptut) } e2e.Logf("Audit config file of kube-apiserver :: %s", execKasOuptut) e2e.Logf("Checking the audit config file of openshif-apiserver currently in use.") podsList = getPodsListByLabel(oc.AsAdmin(), "openshift-apiserver", "app=openshift-apiserver-a") execOasOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-apiserver", "cat /var/run/configmaps/config/config.yaml") re = regexp.MustCompile(`/var/run/configmaps/audit/policy.yaml`) matches = re.FindAllString(execOasOuptut, -1) if len(matches) == 0 { e2e.Failf("Audit config file of openshift-apiserver is wrong :: %s", execOasOuptut) } e2e.Logf("Audit config file of openshift-apiserver :: %v", matches) e2e.Logf("Checking the audit config file of openshif-oauth-apiserver currently in use.") podsList = getPodsListByLabel(oc.AsAdmin(), "openshift-oauth-apiserver", "app=openshift-oauth-apiserver") execAuthOuptut := ExecCommandOnPod(oc, podsList[0], "openshift-oauth-apiserver", "ls /var/run/configmaps/audit/") re = regexp.MustCompile(`policy.yaml`) matches = re.FindAllString(execAuthOuptut, -1) if len(matches) == 0 { e2e.Failf("Audit config file of openshift-oauth-apiserver is wrong :: %s", execAuthOuptut) } e2e.Logf("Audit config file of openshift-oauth-apiserver :: %v", execAuthOuptut) }
apiserverauth
function
openshift/openshift-tests-private
60a5eef5-919d-41df-90cc-e5ba0d4276cf
checkAuditLogs
['"fmt"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func checkAuditLogs(oc *exutil.CLI, script string, masterNode string, namespace string) (string, int) { g.By(fmt.Sprintf("Get audit log file from %s", masterNode)) masterNodeLogs, checkLogFileErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, masterNode, []string{"--quiet=true", "--to-namespace=" + namespace}, "bash", "-c", script) o.Expect(checkLogFileErr).NotTo(o.HaveOccurred()) errCount := len(strings.TrimSpace(masterNodeLogs)) return masterNodeLogs, errCount }
apiserverauth
function
openshift/openshift-tests-private
c2ac1a9b-b9cd-422b-8616-6bac2f072eb4
setAuditProfile
['"fmt"', '"strings"', '"time"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func setAuditProfile(oc *exutil.CLI, patchNamespace string, patch string) string { expectedProgCoStatus := map[string]string{"Progressing": "True"} expectedCoStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"} coOps := []string{"authentication", "openshift-apiserver"} patchOutput, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchNamespace, "--type=json", "-p", patch).Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(patchOutput, "patched") { e2e.Logf("Checking KAS, OAS, Auththentication operators should be in Progressing and Available after audit profile change") g.By("Checking kube-apiserver operator should be in Progressing in 100 seconds") err = waitCoBecomes(oc, "kube-apiserver", 100, expectedProgCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not start progressing in 100 seconds") e2e.Logf("Checking kube-apiserver operator should be Available in 1500 seconds") err = waitCoBecomes(oc, "kube-apiserver", 1500, expectedCoStatus) exutil.AssertWaitPollNoErr(err, "kube-apiserver operator is not becomes available in 1500 seconds") // Using 60s because KAS takes long time, when KAS finished rotation, OAS and Auth should have already finished. for _, ops := range coOps { e2e.Logf("Checking %s should be Available in 60 seconds", ops) err = waitCoBecomes(oc, ops, 60, expectedCoStatus) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%v operator is not becomes available in 60 seconds", ops)) } e2e.Logf("Post audit profile set. KAS, OAS and Auth operator are available after rollout") return patchOutput } return patchOutput }
apiserverauth
function
openshift/openshift-tests-private
b3aa5e74-76fb-4fa9-a222-1372b42ccf35
getNewUser
['"fmt"', '"os"', '"os/exec"']
['User']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getNewUser(oc *exutil.CLI, count int) ([]User, string, string) { command := "htpasswd" _, err := exec.LookPath("command") if err != nil { e2e.Failf("Command '%s' not found in PATH, exit execution!", command) } usersDirPath := "/tmp/" + exutil.GetRandomString() usersHTpassFile := usersDirPath + "/htpasswd" err = os.MkdirAll(usersDirPath, 0o755) o.Expect(err).NotTo(o.HaveOccurred()) htPassSecret, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("oauth/cluster", "-o", "jsonpath={.spec.identityProviders[0].htpasswd.fileData.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if htPassSecret == "" { htPassSecret = "htpass-secret" os.Create(usersHTpassFile) err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", "openshift-config", "secret", "generic", htPassSecret, "--from-file", "htpasswd="+usersHTpassFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("--type=json", "-p", `[{"op": "add", "path": "/spec/identityProviders", "value": [{"htpasswd": {"fileData": {"name": "htpass-secret"}}, "mappingMethod": "claim", "name": "htpasswd", "type": "HTPasswd"}]}]`, "oauth/cluster").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } else { err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("-n", "openshift-config", "secret/"+htPassSecret, "--to", usersDirPath, "--confirm").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } users := make([]User, count) for i := 0; i < count; i++ { // Generate new username and password users[i].Username = fmt.Sprintf("testuser-%v-%v", i, exutil.GetRandomString()) users[i].Password = exutil.GetRandomString() // Add new user to htpasswd file in the temp directory cmd := fmt.Sprintf("htpasswd -b %v %v %v", usersHTpassFile, users[i].Username, users[i].Password) err := exec.Command("bash", "-c", cmd).Run() o.Expect(err).NotTo(o.HaveOccurred()) } // Update htpass-secret with the modified htpasswd file err = oc.AsAdmin().WithoutNamespace().Run("set").Args("-n", "openshift-config", "data", "secret/"+htPassSecret, "--from-file", "htpasswd="+usersHTpassFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking authentication operator should be in Progressing in 180 seconds") err = waitCoBecomes(oc, "authentication", 180, map[string]string{"Progressing": "True"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not start progressing in 180 seconds") e2e.Logf("Checking authentication operator should be Available in 600 seconds") err = waitCoBecomes(oc, "authentication", 600, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not becomes available in 600 seconds") return users, usersHTpassFile, htPassSecret }
apiserverauth
function
openshift/openshift-tests-private
937d353c-61d5-41ea-a784-045d6c2bad29
userCleanup
['"fmt"', '"os"', '"os/exec"']
['User']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func userCleanup(oc *exutil.CLI, users []User, usersHTpassFile string, htPassSecret string) { defer os.RemoveAll(usersHTpassFile) for _, user := range users { // Add new user to htpasswd file in the temp directory cmd := fmt.Sprintf("htpasswd -D %v %v", usersHTpassFile, user.Username) err := exec.Command("bash", "-c", cmd).Run() o.Expect(err).NotTo(o.HaveOccurred()) } // Update htpass-secret with the modified htpasswd file err := oc.AsAdmin().WithoutNamespace().Run("set").Args("-n", "openshift-config", "data", "secret/"+htPassSecret, "--from-file", "htpasswd="+usersHTpassFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking authentication operator should be in Progressing in 180 seconds") err = waitCoBecomes(oc, "authentication", 180, map[string]string{"Progressing": "True"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not start progressing in 180 seconds") e2e.Logf("Checking authentication operator should be Available in 600 seconds") err = waitCoBecomes(oc, "authentication", 600, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}) exutil.AssertWaitPollNoErr(err, "authentication operator is not becomes available in 600 seconds") }
apiserverauth
function
openshift/openshift-tests-private
c666bd63-ab47-4ee6-af10-7e9959bd4600
isConnectedInternet
['"regexp"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isConnectedInternet(oc *exutil.CLI) bool { masterNode, masterErr := exutil.GetFirstMasterNode(oc) o.Expect(masterErr).NotTo(o.HaveOccurred()) cmd := `timeout 9 curl -k https://github.com/openshift/ruby-hello-world/ > /dev/null;[ $? -eq 0 ] && echo "connected"` output, _ := exutil.DebugNodeWithChroot(oc, masterNode, "bash", "-c", cmd) if matched, _ := regexp.MatchString("connected", output); !matched { // Failed to access to the internet in the cluster. return false } return true }
apiserverauth
function
openshift/openshift-tests-private
d7793afa-106b-4b10-a296-e22246c910cc
restartMicroshift
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func restartMicroshift(nodename string) error { // Try restarting microshift three times var restartErr error for i := 0; i < 3; i++ { // Execute the command _, restartErr = runSSHCommand(nodename, "redhat", "sudo systemctl restart microshift") if restartErr != nil { e2e.Logf("Error restarting microshift :: %v", restartErr) time.Sleep(time.Second * 5) // Wait for 5 seconds before retrying continue } // If successful, break out of the loop break } if restartErr != nil { return fmt.Errorf("Failed to restart Microshift server: %v", restartErr) } var output string var err error pollErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) { output, err = runSSHCommand(nodename, "redhat", "sudo systemctl is-active microshift") if err != nil { return false, nil // Retry } return strings.TrimSpace(output) == "active", nil }) if pollErr != nil { return fmt.Errorf("Failed to perform action: %v", pollErr) } e2e.Logf("Microshift restarted successfully") return nil }
apiserverauth
function
openshift/openshift-tests-private
5e79cb9f-bab4-4ede-8652-4687c6bf94be
replacePatternInfile
['"io/ioutil"', '"regexp"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func replacePatternInfile(microshiftFilePathYaml string, oldPattern string, newPattern string) { content, err := ioutil.ReadFile(microshiftFilePathYaml) o.Expect(err).NotTo(o.HaveOccurred()) re := regexp.MustCompile(oldPattern) newContent := re.ReplaceAll(content, []byte(newPattern)) err = ioutil.WriteFile(microshiftFilePathYaml, newContent, 0644) o.Expect(err).NotTo(o.HaveOccurred()) }
apiserverauth
function
openshift/openshift-tests-private
0dbe0abe-af0f-4d3f-be57-d5468b23db48
getPodsList
['"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getPodsList(oc *exutil.CLI, namespace string) []string { podsOp := getResourceToBeReady(oc, asAdmin, withoutNamespace, "pod", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}") podNames := strings.Split(strings.TrimSpace(podsOp), " ") e2e.Logf("Namespace %s pods are: %s", namespace, string(podsOp)) return podNames }
apiserverauth
function
openshift/openshift-tests-private
06652b56-71cc-4a1e-aee6-a549b6803eeb
changeMicroshiftConfig
['"fmt"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func changeMicroshiftConfig(configStr string, nodeName string, configPath string) { etcConfigCMD := fmt.Sprintf(`' configfile=%v cat > $configfile << EOF %v EOF'`, configPath, configStr) _, mchgConfigErr := runSSHCommand(nodeName, "redhat", "sudo bash -c", etcConfigCMD) o.Expect(mchgConfigErr).NotTo(o.HaveOccurred()) }
apiserverauth
function
openshift/openshift-tests-private
269c3b86-5af0-4e18-a7c6-a91023a8e59a
addKustomizationToMicroshift
['"fmt"', '"os/exec"', '"path/filepath"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func addKustomizationToMicroshift(nodeName string, kustomizationFiles map[string][]string) { for key, file := range kustomizationFiles { tmpFileName := getTestDataFilePath(file[0]) replacePatternInfile(tmpFileName, file[2], file[3]) fileOutput, err := exec.Command("bash", "-c", fmt.Sprintf(`cat %s`, tmpFileName)).Output() o.Expect(err).NotTo(o.HaveOccurred()) destFile := filepath.Join(file[1], strings.Split(key, ".")[0]+".yaml") fileCmd := fmt.Sprintf(`'cat > %s << EOF %s EOF'`, destFile, string(fileOutput)) _, mchgConfigErr := runSSHCommand(nodeName, "redhat", "sudo bash -c", fileCmd) o.Expect(mchgConfigErr).NotTo(o.HaveOccurred()) } }
apiserverauth
function
openshift/openshift-tests-private
018bb3ff-010d-49ac-abd7-65a4f778c790
verifyHypershiftCiphers
['"bufio"', '"fmt"', '"os"', '"os/exec"', '"github.com/openshift/openshift-tests-private/test/extended/util"', 'exutil "github.com/openshift/openshift-tests-private/test/extended/util"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func verifyHypershiftCiphers(oc *exutil.CLI, expectedCipher string, ns string) error { var ( cipherStr string randomStr = exutil.GetRandomString() tmpDir = fmt.Sprintf("/tmp/-api-%s/", randomStr) ) defer os.RemoveAll(tmpDir) os.MkdirAll(tmpDir, 0755) for _, item := range []string{"kube-apiserver", "openshift-apiserver", "oauth-openshift"} { e2e.Logf("#### Checking the ciphers of %s:", item) if item == "kube-apiserver" { out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns, "kas-config", `-o=jsonpath='{.data.config\.json}'`).Output() o.Expect(err).NotTo(o.HaveOccurred()) // Use jq command line to extrack .servingInfo part JSON comming in string format jqCmd := fmt.Sprintf(`echo %s | jq -cr '.servingInfo | "\(.cipherSuites) \(.minTLSVersion)"'|tr -d '\n'`, out) outJQ, err := exec.Command("bash", "-c", jqCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) cipherStr = string(outJQ) } else { jsonOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns, item, `-ojson`).OutputToFile("api-" + randomStr + "." + item) o.Expect(err).NotTo(o.HaveOccurred()) jqCmd := fmt.Sprintf(`cat %v | jq -r '.data."config.yaml"'`, jsonOut) yamlConfig, err := exec.Command("bash", "-c", jqCmd).Output() o.Expect(err).NotTo(o.HaveOccurred()) jsonConfig, errJson := util.Yaml2Json(string(yamlConfig)) o.Expect(errJson).NotTo(o.HaveOccurred()) jsonFile := tmpDir + item + "config.json" f, err := os.Create(jsonFile) o.Expect(err).NotTo(o.HaveOccurred()) defer f.Close() w := bufio.NewWriter(f) _, err = fmt.Fprintf(w, "%s", jsonConfig) w.Flush() o.Expect(err).NotTo(o.HaveOccurred()) jqCmd1 := fmt.Sprintf(`jq -cr '.servingInfo | "\(.cipherSuites) \(.minTLSVersion)"' %s |tr -d '\n'`, jsonFile) jsonOut1, err := exec.Command("bash", "-c", jqCmd1).Output() o.Expect(err).NotTo(o.HaveOccurred()) cipherStr = string(jsonOut1) } e2e.Logf("#### Checking if the ciphers has been changed as the expected: %s", expectedCipher) if expectedCipher != cipherStr { e2e.Logf("#### Ciphers of %s are: %s", item, cipherStr) return fmt.Errorf("Ciphers not matched") } e2e.Logf("#### Ciphers are matched.") } return nil }
apiserverauth
function
openshift/openshift-tests-private
c41007c3-ff2d-4bc1-9313-5c58a12dea0c
waitApiserverRestartOfHypershift
['"context"', '"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func waitApiserverRestartOfHypershift(oc *exutil.CLI, appLabel string, ns string, waitTime int) error { re, err := regexp.Compile(`(0/[0-9]|Pending|Terminating|Init)`) o.Expect(err).NotTo(o.HaveOccurred()) errKas := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) { out, _ := getResource(oc, asAdmin, withoutNamespace, "pods", "-l", "app="+appLabel, "--no-headers", "-n", ns) if matched := re.MatchString(out); matched { e2e.Logf("#### %s was restarting ...", appLabel) return false, nil } // Recheck status of pods and to do further confirm , avoid false restarts for i := 1; i <= 3; i++ { time.Sleep(10 * time.Second) out, _ = getResource(oc, asAdmin, withoutNamespace, "pods", "-l", "app="+appLabel, "--no-headers", "-n", ns) if matchedAgain := re.MatchString(out); matchedAgain { e2e.Logf("#### %s was restarting ...", appLabel) return false, nil } } e2e.Logf("#### %s have been restarted!", appLabel) return true, nil }) exutil.AssertWaitPollNoErr(errKas, "Failed to complete the restart within the expected time, please check the cluster status!") return errKas }
apiserverauth
function
openshift/openshift-tests-private
af7884ca-8fb0-4aef-8ab9-fc623bab2f4e
containsAnyWebHookReason
['"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func containsAnyWebHookReason(webhookError string, conditionReasons interface{}) bool { switch reasons := conditionReasons.(type) { case string: return strings.Contains(webhookError, reasons) case []string: for _, reason := range reasons { if strings.Contains(webhookError, reason) { return true } } return false default: return false } }
apiserverauth
function
openshift/openshift-tests-private
32866dc9-db24-4c92-b3d9-e4b877e4d6d1
clientCurl
['"crypto/tls"', '"fmt"', '"io/ioutil"', '"net/http"', '"net/url"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func clientCurl(tokenValue string, url string) string { timeoutDuration := 3 * time.Second var bodyString string proxyURL := getProxyURL() req, err := http.NewRequest("GET", url, nil) if err != nil { e2e.Failf("error creating request: %v", err) } req.Header.Set("Authorization", "Bearer "+tokenValue) transport := &http.Transport{ Proxy: http.ProxyURL(proxyURL), TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } client := &http.Client{ Transport: transport, Timeout: timeoutDuration, } errCurl := wait.PollImmediate(10*time.Second, 300*time.Second, func() (bool, error) { resp, err := client.Do(req) if err != nil { return false, nil } defer resp.Body.Close() if resp.StatusCode == 200 { bodyBytes, _ := ioutil.ReadAll(resp.Body) bodyString = string(bodyBytes) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(errCurl, fmt.Sprintf("error waiting for curl request output: %v", errCurl)) return bodyString }
apiserverauth
function
openshift/openshift-tests-private
e269507a-8ed5-4e98-9937-71d3542a11e3
getApiServerFQDNandPort
['"net/url"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func getApiServerFQDNandPort(oc *exutil.CLI, hypershiftCluster bool) (string, string) { var ( apiServerURL string configErr error ) if !hypershiftCluster { apiServerURL, configErr = oc.AsAdmin().WithoutNamespace().Run("config").Args("view", "-ojsonpath={.clusters[0].cluster.server}").Output() } else { apiServerURL, configErr = oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("config").Args("view", "-ojsonpath={.clusters[0].cluster.server}").Output() } o.Expect(configErr).NotTo(o.HaveOccurred()) fqdnName, parseErr := url.Parse(apiServerURL) o.Expect(parseErr).NotTo(o.HaveOccurred()) return fqdnName.Hostname(), fqdnName.Port() }
apiserverauth
function
openshift/openshift-tests-private
73554eb7-d4df-4ba9-a0f0-e748353f88d5
isTechPreviewNoUpgrade
['"context"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isTechPreviewNoUpgrade(oc *exutil.CLI) bool { featureGate, err := oc.AdminConfigClient().ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return false } e2e.Failf("could not retrieve feature-gate: %v", err) } return featureGate.Spec.FeatureSet == configv1.TechPreviewNoUpgrade }
apiserverauth
function
openshift/openshift-tests-private
7877e492-edac-4aed-88e4-c04c6620d03b
isIPv4
['"net"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isIPv4(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ".") }
apiserverauth
function
openshift/openshift-tests-private
03ac8f3e-543d-4f4d-9e03-59c6e56c6706
isIPv6
['"net"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isIPv6(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ":") }
apiserverauth
function
openshift/openshift-tests-private
9fb3c34f-16f7-4eeb-8d28-a5f53d64aec8
copyImageToInternelRegistry
['"crypto/tls"', '"os/exec"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func copyImageToInternelRegistry(oc *exutil.CLI, namespace string, source string, dest string) (string, error) { var ( podName string appName = "skopeo" err error ) podName, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", "name="+appName, "-o", `jsonpath={.items[*].metadata.name}`).Output() // If the skopeo pod doesn't exist, create it if len(podName) == 0 { template := getTestDataFilePath("skopeo-deployment.json") err = oc.Run("create").Args("-f", template, "-n", namespace).Execute() o.Expect(err).NotTo(o.HaveOccurred()) podName = getPodsListByLabel(oc.AsAdmin(), namespace, "name="+appName)[0] exutil.AssertPodToBeReady(oc, podName, namespace) } else { output, err := oc.AsAdmin().Run("get").Args("pod", podName, "-n", namespace, "-o", "jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("True"), appName+" pod is not ready!") } token, err := getSAToken(oc, "builder", namespace) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(token).NotTo(o.BeEmpty()) command := []string{podName, "-n", namespace, "--", appName, "--insecure-policy", "--src-tls-verify=false", "--dest-tls-verify=false", "copy", "--dcreds", "dnm:" + token, source, dest} results, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(command...).Output() return results, err }
apiserverauth
function
openshift/openshift-tests-private
ad1932ad-86f7-4851-889b-702a4dc7b296
isBaselineCapsSet
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isBaselineCapsSet(oc *exutil.CLI) bool { baselineCapabilitySet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.spec.capabilities.baselineCapabilitySet}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("baselineCapabilitySet parameters: %v\n", baselineCapabilitySet) return len(baselineCapabilitySet) != 0 }
apiserverauth
function
openshift/openshift-tests-private
379d7011-0f35-4921-947f-c606ed8f41d8
isEnabledCapability
['"strings"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func isEnabledCapability(oc *exutil.CLI, component string) bool { enabledCapabilities, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].status.capabilities.enabledCapabilities}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Cluster enabled capability parameters: %v\n", enabledCapabilities) return strings.Contains(enabledCapabilities, component) }
apiserverauth
function
openshift/openshift-tests-private
893d1138-d178-44b9-9ce5-26a9160ffb8d
checkURLEndpointAccess
['"context"', '"fmt"', '"net/url"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func checkURLEndpointAccess(oc *exutil.CLI, hostIP, nodePort, podName, portCommand, status string) { var url string var curlOutput string var curlErr error if isIPv6(hostIP) { url = fmt.Sprintf("[%s]:%s", hostIP, nodePort) } else { url = fmt.Sprintf("%s:%s", hostIP, nodePort) } // Construct the full command with the specified command and URL var fullCommand string if portCommand == "https" { fullCommand = fmt.Sprintf("curl -k https://%s", url) } else { fullCommand = fmt.Sprintf("curl %s", url) } e2e.Logf("Command: %v", fullCommand) e2e.Logf("Checking if the specified URL endpoint %s is accessible", url) err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 6*time.Second, false, func(cxt context.Context) (bool, error) { curlOutput, curlErr = oc.Run("exec").Args(podName, "-i", "--", "sh", "-c", fullCommand).Output() if curlErr != nil { return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Unable to access %s", url)) o.Expect(curlOutput).To(o.ContainSubstring(status)) }
apiserverauth
function
openshift/openshift-tests-private
ff82d995-f77a-4b39-9312-9685ab828593
urlHealthCheck
['"context"', '"crypto/tls"', '"crypto/x509"', '"fmt"', '"io/ioutil"', '"net/http"', '"net/url"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['CertificateDetails']
github.com/openshift/openshift-tests-private/test/extended/apiserverauth/apiserver_util.go
func urlHealthCheck(fqdnName string, port string, certPath string, returnValues []string) (*CertificateDetails, error) { proxyURL := getProxyURL() caCert, err := ioutil.ReadFile(certPath) if err != nil { return nil, fmt.Errorf("Error reading CA certificate: %s", err) } // Create a CertPool and add the CA certificate caCertPool := x509.NewCertPool() if !caCertPool.AppendCertsFromPEM(caCert) { return nil, fmt.Errorf("Failed to append CA certificate") } // Create a custom transport with the CA certificate transport := &http.Transport{ Proxy: http.ProxyURL(proxyURL), TLSClientConfig: &tls.Config{ RootCAs: caCertPool, }, } client := &http.Client{ Transport: transport, } url := fmt.Sprintf("https://%s:%s/healthz", fqdnName, port) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() var certDetails *CertificateDetails err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) { resp, err := client.Get(url) if err != nil { e2e.Logf("Error performing HTTP request: %s, retrying...\n", err) return false, nil } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return false, fmt.Errorf("Error reading response body: %s", err) } certDetails = &CertificateDetails{} if resp.TLS != nil && len(resp.TLS.PeerCertificates) > 0 { cert := resp.TLS.PeerCertificates[0] for _, value := range returnValues { switch value { case "CurlResponse": certDetails.CurlResponse = string(body) case "Subject": certDetails.Subject = cert.Subject.String() case "Issuer": certDetails.Issuer = cert.Issuer.String() case "NotBefore": certDetails.NotBefore = cert.NotBefore.Format(time.RFC3339) case "NotAfter": certDetails.NotAfter = cert.NotAfter.Format(time.RFC3339) case "SubjectAltName": certDetails.SubjectAltName = cert.DNSNames case "SerialNumber": certDetails.SerialNumber = cert.SerialNumber.String() } } } return true, nil }) if err != nil { return nil, fmt.Errorf("Error performing HTTP request: %s", err) } return certDetails, nil }
apiserverauth