element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | bafa5e98-0b81-46e6-8b5d-e77823456b62 | State | ['"strings"'] | ['vsphereInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func (vs *vsphereInstance) State() (string, error) {
instanceState, statusErr := vs.vspObj.GetVspheresInstanceState(vs.vspClient, vs.vmRelativePath+vs.nodeName)
return strings.ToLower(instanceState), statusErr
} | disasterrecovery | |||
function | openshift/openshift-tests-private | a14145d0-5996-4fd4-b455-e831f467a55a | getvSphereServerConfig | ['"fmt"', '"os"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | ['vSphereConfig'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func getvSphereServerConfig(oc *exutil.CLI, vSphereConfigFile string) (string, string, error) {
fileContent, err := os.ReadFile(vSphereConfigFile)
if err != nil {
return "", "", fmt.Errorf("error reading configuration file: %s", err)
}
// Try to parse as INI format
cfg, err := ini.Load(vSphereConfigFile)
if err == nil {
// INI parsing succeeded, extract values
serverURL := cfg.Section("Workspace").Key("server").String()
vmRelativePath := cfg.Section("Workspace").Key("folder").String()
return serverURL, vmRelativePath + "/", nil
}
// If INI parsing fails, try parsing as YAML
var yamlConfig vSphereConfig
err = yaml.Unmarshal(fileContent, &yamlConfig)
if err != nil {
return "", "", fmt.Errorf("error parsing configuration as YAML: %s", err)
}
// Extract values from the YAML structure
for _, vcenter := range yamlConfig.Vcenter {
if vcenter.Server != "" {
serverURL := vcenter.Server
var vmRelativePath string
if len(vcenter.Datacenters) > 0 {
vmRelativePath = vcenter.Datacenters[0]
}
infrastructureName := clusterinfra.GetInfrastructureName(oc)
o.Expect(infrastructureName).ShouldNot(o.BeEmpty(), "The infrastructure name should not be empty")
return serverURL, "/" + vmRelativePath + "/vm/" + infrastructureName + "/", nil
}
}
return "", "", fmt.Errorf("no valid configuration found")
} | disasterrecovery | |||
test | openshift/openshift-tests-private | 531fc06e-f123-4b5c-8ef4-aad34bf7dc9b | dr_testing_etcd | import (
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | package disasterrecovery
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-disasterrecovery] DR_Testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("default-"+getRandomString(), exutil.KubeConfigPath())
iaasPlatform string
)
g.BeforeEach(func() {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
iaasPlatform = strings.ToLower(output)
if strings.Contains(iaasPlatform, "baremetal") || strings.Contains(iaasPlatform, "none") {
g.Skip("IAAS platform: " + iaasPlatform + " is not supported yet for DR - skipping test ...")
}
if !IsCOHealthy(oc, "etcd") {
g.Skip("PreCheck : etcd operator is degraded. Hence skipping the test.")
}
if !IsCOHealthy(oc, "kube-apiserver") {
g.Skip("PreCheck : kube-apiserver operator is degraded. Hence skipping the test.")
}
})
g.AfterEach(func() {
if !healthyCheck(oc) {
e2e.Failf("Cluster healthy check failed after the test.")
}
})
// author: [email protected]
g.It("Author:yinzhou-NonPreRelease-Longduration-Critical-42183-backup and restore should perform consistency checks on etcd snapshots [Disruptive]", func() {
g.By("Test for case OCP-42183 backup and restore should perform consistency checks on etcd snapshots")
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Run the backup")
masterN, etcdDb := runDRBackup(oc, masterNodeList)
defer func() {
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "rm", "-rf", "/home/core/assets/backup")
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Corrupt the etcd db file ")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "truncate", "-s", "126k", etcdDb)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Run the restore")
output, _ := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "/usr/local/bin/cluster-restore.sh", "/home/core/assets/backup")
o.Expect(output).To(o.ContainSubstring("Backup appears corrupted. Aborting!"))
})
// author: [email protected]
g.It("Author:skundu-LEVEL0-Longduration-NonPreRelease-Critical-77921-workflow of quorum restoration. [Disruptive][Slow]", func() {
var (
bastionHost = ""
userForBastion = ""
)
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix", "ibmcloud"}
platformListWithoutBastion := []string{"vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
privateKeyForBastion := os.Getenv("SSH_CLOUD_PRIV_KEY")
if privateKeyForBastion == "" {
g.Skip("Failed to get the private key, skip the cases!!")
}
withoutBastion := in(iaasPlatform, platformListWithoutBastion)
if !withoutBastion {
bastionHost = os.Getenv("QE_BASTION_PUBLIC_ADDRESS")
if bastionHost == "" {
g.Skip("Failed to get the qe bastion public ip, skip the case !!")
}
userForBastion = getUserNameAndKeyonBationByPlatform(iaasPlatform)
if userForBastion == "" {
g.Skip("Failed to get the user for bastion host, hence skipping the case!!")
}
}
g.By("make sure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeInternalIPList := getNodeInternalIPListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("bastion host is : %v", bastionHost)
e2e.Logf("platform is : %v", iaasPlatform)
e2e.Logf("user on bastion is : %v", userForBastion)
g.By("Make sure all the nodes are normal")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
checkMessage := []string{
"SchedulingDisabled",
"NotReady",
}
for _, v := range checkMessage {
if !o.Expect(out).ShouldNot(o.ContainSubstring(v)) {
g.Skip("The cluster nodes is abnormal, skip this case")
}
}
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Make the two non-recovery control plane nodes NOT_READY")
//if assert err the cluster will be unavailable
for i := 1; i < len(masterNodeInternalIPList); i++ {
_, err := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo /usr/local/bin/disable-etcd.sh", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
_, err1 := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo rm -rf /var/lib/etcd", privateKeyForBastion, userForBastion)
o.Expect(err1).NotTo(o.HaveOccurred())
_, err2 := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo systemctl stop kubelet.service", privateKeyForBastion, userForBastion)
o.Expect(err2).NotTo(o.HaveOccurred())
}
g.By("Run the quorum-restore script on the recovery control plane host")
msg, err := runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo -E /usr/local/bin/quorum-restore.sh", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("starting restore-etcd static pod"))
g.By("Wait for the api server to come up after restore operation.")
errW := wait.Poll(20*time.Second, 900*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(masterNodeList[0], out); matched {
e2e.Logf("Api is back online:")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errW, "the Apiserver has not come up after quorum restore operation")
g.By("Start the kubelet service on both the non-recovery control plane hosts")
for i := 1; i < len(masterNodeList); i++ {
_, _ = runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo systemctl start kubelet.service", privateKeyForBastion, userForBastion)
}
g.By("Wait for the nodes to be Ready.")
for i := 0; i < len(masterNodeList); i++ {
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNodeList[i]).Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(" Ready", out); matched {
e2e.Logf("Node %s is back online:\n%s", masterNodeList[i], out)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "the kubelet start has not brought the node online and Ready")
}
defer checkOperator(oc, "etcd")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": null}}")).Execute()
g.By("Turn off quorum guard to ensure revision rollouts of static pods")
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
errGrd := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": {\"useUnsupportedUnsafeNonHANonProductionUnstableEtcd\": true}}}")).Execute()
if errGrd != nil {
e2e.Logf("server is not ready yet, error: %s. Trying again ...", errGrd)
return false, nil
} else {
e2e.Logf("successfully patched.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "unable to patch the server to turn off the quorum guard.")
// both etcd and kube-apiserver operators start and end roll out almost simultaneously.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
waitForOperatorRestart(oc, "etcd")
}()
waitForOperatorRestart(oc, "kube-apiserver")
wg.Wait()
})
// author: [email protected]
g.It("Author:geliu-NonPreRelease-Longduration-Critical-50205-lost master can be replaced by new one with machine config recreation in ocp 4.x [Disruptive][Slow]", func() {
g.By("Test for case lost master can be replaced by new one with machine config recreation in ocp 4.x")
g.By("Get all the master node name & count")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
g.By("make sure all the etcd pods are running")
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
g.By("Export the machine config file for 1st master node")
output, err := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
masterMachineNameList := strings.Fields(output)
machineYmlFile := ""
machineYmlFile, err = oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", masterMachineNameList[0], "-o", "yaml").OutputToFile("machine.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
newMachineConfigFile := strings.Replace(machineYmlFile, "machine.yaml", "machineUpd.yaml", -1)
defer exec.Command("bash", "-c", "rm -f "+machineYmlFile).Output()
defer exec.Command("bash", "-c", "rm -f "+newMachineConfigFile).Output()
g.By("update machineYmlFile to newMachineYmlFile:")
newMasterMachineNameSuffix := masterMachineNameList[0] + "00"
o.Expect(updateMachineYmlFile(machineYmlFile, masterMachineNameList[0], newMasterMachineNameSuffix)).To(o.BeTrue())
g.By("Create new machine")
resultFile, _ := exec.Command("bash", "-c", "cat "+newMachineConfigFile).Output()
e2e.Logf("####newMasterMachineNameSuffix is %s\n", string(resultFile))
_, err = oc.AsAdmin().Run("create").Args("-n", "openshift-machine-api", "-f", newMachineConfigFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitMachineStatusRunning(oc, newMasterMachineNameSuffix)
g.By("Delete machine of the unhealthy master node")
_, err = oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "machine", masterMachineNameList[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(getNodeListByLabel(oc, "node-role.kubernetes.io/master="))).To(o.Equal(masterNodeCount))
})
// author: [email protected]
g.It("Longduration-Author:skundu-NonPreRelease-Critical-51109-Delete an existing machine at first and then add a new one. [Disruptive]", func() {
g.By("Test for delete an existing machine at first and then add a new one")
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
var (
mMachineop = ""
machineStatusOutput = ""
)
g.By("Make sure all the etcd pods are running")
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
g.By("Get all the master node name & count")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
g.By("Get master machine name list")
output, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(errMachineConfig).NotTo(o.HaveOccurred())
masterMachineNameList := strings.Fields(output)
g.By("At first delete machine of the master node without adding new one")
errMachineDelete := oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "--wait=false", "machine", masterMachineNameList[0]).Execute()
o.Expect(errMachineDelete).NotTo(o.HaveOccurred())
g.By("Verify that the machine is getting deleted and new machine is automatically created")
waitforDesiredMachineCount(oc, masterNodeCount+1)
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
mMachineopraw, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
if errMachineConfig != nil {
e2e.Logf("Failed to get machine name: %s. Trying again", errMachineConfig)
return false, nil
} else {
mMachineop = mMachineopraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "Failed to get master machine names")
mMachineNameList := strings.Fields(mMachineop)
errSt := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
machineStatusraw, errStatus := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o", "jsonpath={.items[*].status.phase}").Output()
if errStatus != nil {
e2e.Logf("Failed to get machine status: %s. Trying again", errStatus)
return false, nil
} else {
machineStatusOutput = machineStatusraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errSt, "Failed to get master machine status")
mMachineStatus := strings.Fields(machineStatusOutput)
e2e.Logf("masterMachineStatus after deletion is %v", mMachineStatus)
o.Expect(in("Deleting", mMachineStatus)).To(o.Equal(true))
newMasterMachine := getNewMastermachine(mMachineStatus, mMachineNameList, "Provision")
g.By("Verify that the new machine is in running state.")
waitMachineStatusRunning(oc, newMasterMachine)
g.By("Verify that the old machine is deleted. The master machine count is same as initial one.")
waitforDesiredMachineCount(oc, masterNodeCount)
})
// author: [email protected]
g.It("Longduration-Author:skundu-NonPreRelease-Critical-59377-etcd-operator should not scale-down when all members are healthy. [Disruptive]", func() {
g.By("etcd-operator should not scale-down when all members are healthy")
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
var (
mMachineop = ""
machineStatusOutput = ""
)
g.By("Make sure all the etcd pods are running")
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
g.By("Get all the master node name & count")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
e2e.Logf("masterNodeCount is %v", masterNodeCount)
g.By("Get master machine name list")
output, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(errMachineConfig).NotTo(o.HaveOccurred())
masterMachineNameList := strings.Fields(output)
e2e.Logf("masterMachineNameList is %v", masterMachineNameList)
g.By("Delete the CR")
_, err := oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "controlplanemachineset.machine.openshift.io", "cluster").Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForDesiredStateOfCR(oc, "Inactive")
g.By("delete machine of the master node")
errMachineDelete := oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "--wait=false", "machine", masterMachineNameList[0]).Execute()
o.Expect(errMachineDelete).NotTo(o.HaveOccurred())
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
machineStatusOutputraw, errStatus := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o", "jsonpath={.items[*].status.phase}").Output()
if errStatus != nil {
e2e.Logf("Failed to get master machine name: %s. Trying again", errStatus)
return false, nil
} else {
machineStatusOutput = machineStatusOutputraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "Failed to get master machine names")
masterMachineStatus := strings.Fields(machineStatusOutput)
e2e.Logf("masterMachineStatus after deletion is %v", masterMachineStatus)
waitMachineDesiredStatus(oc, masterMachineNameList[0], "Deleting")
g.By("enable the control plane machineset")
errW := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
patch := `[{"op": "replace", "path": "/spec/state", "value": "Active"}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", "openshift-machine-api", "controlplanemachineset.machine.openshift.io", "cluster", "--type=json", "-p", patch).Execute()
if patchErr != nil {
e2e.Logf("unable to apply patch the machineset, error: %s. Trying again ...", patchErr)
return false, nil
} else {
e2e.Logf("successfully patched the machineset.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errW, "unable to enable the comtrol plane machineset.")
waitForDesiredStateOfCR(oc, "Active")
errSt := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
machineStatusraw, errStatus := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o", "jsonpath={.items[*].status.phase}").Output()
if errStatus != nil {
e2e.Logf("Failed to get machine status: %s. Trying again", errStatus)
return false, nil
}
if match, _ := regexp.MatchString("Provision", machineStatusraw); match {
e2e.Logf("machine status Provision showed up")
machineStatusOutput = machineStatusraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errSt, "Failed to get master machine status")
mMachineStatus := strings.Fields(machineStatusOutput)
errWt := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
mMachineopraw, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
if errMachineConfig != nil {
e2e.Logf("Failed to get machine name: %s. Trying again", errMachineConfig)
return false, nil
} else {
mMachineop = mMachineopraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWt, "Failed to get master machine names")
mMachineNameList := strings.Fields(mMachineop)
e2e.Logf("masterMachineStatus after enabling the CPMS is %v", mMachineStatus)
newMasterMachine := getNewMastermachine(mMachineStatus, mMachineNameList, "Provision")
g.By("Verify that the new machine is in running state.")
waitMachineStatusRunning(oc, newMasterMachine)
g.By("Verify that the old machine is deleted. The master machine count is same as initial one.")
waitforDesiredMachineCount(oc, masterNodeCount)
})
// author: [email protected]
g.It("Longduration-Author:skundu-NonPreRelease-Critical-53767-cluster-backup.sh exits with a non-zero code in case Etcd backup fails. [Disruptive]", func() {
g.By("Test for case OCP-53767 - cluster-backup.sh exits with a non-zero code in case Etcd backup fails.")
g.Skip("Skipping this test temporarily because it is redundant with OCP-42183")
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Run the backup")
masterN, etcdDb := runDRBackup(oc, strings.Fields(masterNodeList[0]))
defer func() {
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "rm", "-rf", "/home/core/assets/backup")
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Corrupt the etcd db file ")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "truncate", "-s", "126k", etcdDb)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Run the restore")
output, _ := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "/usr/local/bin/cluster-restore.sh", "/home/core/assets/backup")
o.Expect(strings.Contains(output, "Backup appears corrupted. Aborting!")).To(o.BeTrue())
o.Expect(strings.Contains(output, "non-zero exit code")).To(o.BeTrue())
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:skundu-Critical-68658-CEO prevents member deletion during revision rollout. [Disruptive]", func() {
g.By("Test for case OCP-68658 - CEO prevents member deletion during revision rollout.")
var (
mhcName = "control-plane-health-68658"
nameSpace = "openshift-machine-api"
maxUnhealthy = 1
)
g.By("1. Create MachineHealthCheck")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "dr_mhc.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + mhcName, "NAMESPACE=" + nameSpace, "MAXUNHEALTHY=" + strconv.Itoa(maxUnhealthy)}
defer oc.AsAdmin().Run("delete").Args("mhc", mhcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
g.By("2. Verify MachineHealthCheck")
mhcMaxUnhealthy, errStatus := oc.AsAdmin().Run("get").Args("-n", nameSpace, "mhc", mhcName, "-o", "jsonpath={.spec.maxUnhealthy}").Output()
o.Expect(errStatus).NotTo(o.HaveOccurred())
if mhcMaxUnhealthy != strconv.Itoa(maxUnhealthy) {
e2e.Failf("Failed to verify mhc newly created MHC %v", mhcName)
}
g.By("3. Get all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
g.By("4. Stop the kubelet service on one of the master nodes")
_, _ = exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "systemctl", "stop", "kubelet")
g.By("5. Ensure etcd oprator goes into degraded state and eventually recovers from it.")
waitForOperatorRestart(oc, "etcd")
waitforDesiredMachineCount(oc, masterNodeCount)
g.By("6. Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
})
// author: [email protected]
g.It("Author:skundu-Longduration-NonPreRelease-Critical-77922-workflow of point-in-time restoration. [Disruptive][Slow]", func() {
var (
bastionHost = ""
userForBastion = ""
)
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix", "ibmcloud"}
platformListWithoutBastion := []string{"vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
privateKeyForBastion := os.Getenv("SSH_CLOUD_PRIV_KEY")
if privateKeyForBastion == "" {
g.Skip("Failed to get the private key, skip the cases!!")
}
withoutBastion := in(iaasPlatform, platformListWithoutBastion)
if !withoutBastion {
bastionHost = os.Getenv("QE_BASTION_PUBLIC_ADDRESS")
if bastionHost == "" {
g.Skip("Failed to get the qe bastion public ip, skip the case !!")
}
userForBastion = getUserNameAndKeyonBationByPlatform(iaasPlatform)
if userForBastion == "" {
g.Skip("Failed to get the user for bastion host, hence skipping the case!!")
}
}
g.By("make sure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeInternalIPList := getNodeInternalIPListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("bastion host is : %v", bastionHost)
e2e.Logf("platform is : %v", iaasPlatform)
e2e.Logf("user on bastion is : %v", userForBastion)
g.By("Make sure all the nodes are normal")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
checkMessage := []string{
"SchedulingDisabled",
"NotReady",
}
for _, v := range checkMessage {
if !o.Expect(out).ShouldNot(o.ContainSubstring(v)) {
g.Skip("The cluster nodes is abnormal, skip this case")
}
}
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Run the backup on the recovery node.")
defer runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo rm -rf /home/core/assets/backup", privateKeyForBastion, userForBastion)
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
msg, err := runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo /usr/local/bin/cluster-backup.sh /home/core/assets/backup", privateKeyForBastion, userForBastion)
if err != nil {
e2e.Logf("backup failed with the err:%v, and try next round", err)
return false, nil
}
if o.Expect(msg).To(o.ContainSubstring("snapshot db and kube resources are successfully saved")) {
e2e.Logf("backup successfully saved.")
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("backup is failed with error"))
g.By("Disable the etcd pods on all the control plane nodes")
for i := 0; i < len(masterNodeInternalIPList); i++ {
_, err := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo -E /usr/local/bin/disable-etcd.sh", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Restore the backup on the recovery control plane host")
msg, err := runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo -E /usr/local/bin/cluster-restore.sh /home/core/assets/backup", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("starting restore-etcd static pod"))
g.By("Wait for the api server to come up after restore operation.")
errW := wait.Poll(20*time.Second, 900*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(masterNodeList[0], out); matched {
e2e.Logf("Api is back online:")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errW, "the Apiserver has not come up after point-in-time restore operation")
g.By("Wait for the nodes to be Ready.")
for i := 0; i < len(masterNodeList); i++ {
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNodeList[i]).Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(" Ready", out); matched {
e2e.Logf("Node %s is back online:\n%s", masterNodeList[i], out)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "the kubelet start has not brought the node online and Ready")
}
defer checkOperator(oc, "etcd")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": null}}")).Execute()
g.By("Turn off quorum guard to ensure revision rollouts of static pods")
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
errGrd := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": {\"useUnsupportedUnsafeNonHANonProductionUnstableEtcd\": true}}}")).Execute()
if errGrd != nil {
e2e.Logf("server is not ready yet, error: %s. Trying again ...", errGrd)
return false, nil
} else {
e2e.Logf("successfully patched.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "unable to patch the server to turn off the quorum guard.")
// both etcd and kube-apiserver operators start and end roll out almost simultaneously.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
waitForOperatorRestart(oc, "etcd")
}()
waitForOperatorRestart(oc, "kube-apiserver")
wg.Wait()
})
})
| package disasterrecovery | ||||
test case | openshift/openshift-tests-private | c9eb7779-f125-4558-8eca-9a4457450cc8 | Author:yinzhou-NonPreRelease-Longduration-Critical-42183-backup and restore should perform consistency checks on etcd snapshots [Disruptive] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Author:yinzhou-NonPreRelease-Longduration-Critical-42183-backup and restore should perform consistency checks on etcd snapshots [Disruptive]", func() {
g.By("Test for case OCP-42183 backup and restore should perform consistency checks on etcd snapshots")
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Run the backup")
masterN, etcdDb := runDRBackup(oc, masterNodeList)
defer func() {
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "rm", "-rf", "/home/core/assets/backup")
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Corrupt the etcd db file ")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "truncate", "-s", "126k", etcdDb)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Run the restore")
output, _ := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "/usr/local/bin/cluster-restore.sh", "/home/core/assets/backup")
o.Expect(output).To(o.ContainSubstring("Backup appears corrupted. Aborting!"))
}) | ||||||
test case | openshift/openshift-tests-private | 49a31113-8220-4a37-924c-f1533efa4d62 | Author:skundu-LEVEL0-Longduration-NonPreRelease-Critical-77921-workflow of quorum restoration. [Disruptive][Slow] | ['"fmt"', '"os"', '"regexp"', '"sync"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Author:skundu-LEVEL0-Longduration-NonPreRelease-Critical-77921-workflow of quorum restoration. [Disruptive][Slow]", func() {
var (
bastionHost = ""
userForBastion = ""
)
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix", "ibmcloud"}
platformListWithoutBastion := []string{"vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
privateKeyForBastion := os.Getenv("SSH_CLOUD_PRIV_KEY")
if privateKeyForBastion == "" {
g.Skip("Failed to get the private key, skip the cases!!")
}
withoutBastion := in(iaasPlatform, platformListWithoutBastion)
if !withoutBastion {
bastionHost = os.Getenv("QE_BASTION_PUBLIC_ADDRESS")
if bastionHost == "" {
g.Skip("Failed to get the qe bastion public ip, skip the case !!")
}
userForBastion = getUserNameAndKeyonBationByPlatform(iaasPlatform)
if userForBastion == "" {
g.Skip("Failed to get the user for bastion host, hence skipping the case!!")
}
}
g.By("make sure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeInternalIPList := getNodeInternalIPListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("bastion host is : %v", bastionHost)
e2e.Logf("platform is : %v", iaasPlatform)
e2e.Logf("user on bastion is : %v", userForBastion)
g.By("Make sure all the nodes are normal")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
checkMessage := []string{
"SchedulingDisabled",
"NotReady",
}
for _, v := range checkMessage {
if !o.Expect(out).ShouldNot(o.ContainSubstring(v)) {
g.Skip("The cluster nodes is abnormal, skip this case")
}
}
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Make the two non-recovery control plane nodes NOT_READY")
//if assert err the cluster will be unavailable
for i := 1; i < len(masterNodeInternalIPList); i++ {
_, err := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo /usr/local/bin/disable-etcd.sh", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
_, err1 := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo rm -rf /var/lib/etcd", privateKeyForBastion, userForBastion)
o.Expect(err1).NotTo(o.HaveOccurred())
_, err2 := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo systemctl stop kubelet.service", privateKeyForBastion, userForBastion)
o.Expect(err2).NotTo(o.HaveOccurred())
}
g.By("Run the quorum-restore script on the recovery control plane host")
msg, err := runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo -E /usr/local/bin/quorum-restore.sh", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("starting restore-etcd static pod"))
g.By("Wait for the api server to come up after restore operation.")
errW := wait.Poll(20*time.Second, 900*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(masterNodeList[0], out); matched {
e2e.Logf("Api is back online:")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errW, "the Apiserver has not come up after quorum restore operation")
g.By("Start the kubelet service on both the non-recovery control plane hosts")
for i := 1; i < len(masterNodeList); i++ {
_, _ = runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo systemctl start kubelet.service", privateKeyForBastion, userForBastion)
}
g.By("Wait for the nodes to be Ready.")
for i := 0; i < len(masterNodeList); i++ {
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNodeList[i]).Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(" Ready", out); matched {
e2e.Logf("Node %s is back online:\n%s", masterNodeList[i], out)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "the kubelet start has not brought the node online and Ready")
}
defer checkOperator(oc, "etcd")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": null}}")).Execute()
g.By("Turn off quorum guard to ensure revision rollouts of static pods")
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
errGrd := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": {\"useUnsupportedUnsafeNonHANonProductionUnstableEtcd\": true}}}")).Execute()
if errGrd != nil {
e2e.Logf("server is not ready yet, error: %s. Trying again ...", errGrd)
return false, nil
} else {
e2e.Logf("successfully patched.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "unable to patch the server to turn off the quorum guard.")
// both etcd and kube-apiserver operators start and end roll out almost simultaneously.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
waitForOperatorRestart(oc, "etcd")
}()
waitForOperatorRestart(oc, "kube-apiserver")
wg.Wait()
}) | |||||
test case | openshift/openshift-tests-private | ddeaf9d3-2cd4-45ba-b489-cd572045102f | Author:geliu-NonPreRelease-Longduration-Critical-50205-lost master can be replaced by new one with machine config recreation in ocp 4.x [Disruptive][Slow] | ['"os/exec"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Author:geliu-NonPreRelease-Longduration-Critical-50205-lost master can be replaced by new one with machine config recreation in ocp 4.x [Disruptive][Slow]", func() {
g.By("Test for case lost master can be replaced by new one with machine config recreation in ocp 4.x")
g.By("Get all the master node name & count")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
g.By("make sure all the etcd pods are running")
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
g.By("Export the machine config file for 1st master node")
output, err := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
masterMachineNameList := strings.Fields(output)
machineYmlFile := ""
machineYmlFile, err = oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", masterMachineNameList[0], "-o", "yaml").OutputToFile("machine.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
newMachineConfigFile := strings.Replace(machineYmlFile, "machine.yaml", "machineUpd.yaml", -1)
defer exec.Command("bash", "-c", "rm -f "+machineYmlFile).Output()
defer exec.Command("bash", "-c", "rm -f "+newMachineConfigFile).Output()
g.By("update machineYmlFile to newMachineYmlFile:")
newMasterMachineNameSuffix := masterMachineNameList[0] + "00"
o.Expect(updateMachineYmlFile(machineYmlFile, masterMachineNameList[0], newMasterMachineNameSuffix)).To(o.BeTrue())
g.By("Create new machine")
resultFile, _ := exec.Command("bash", "-c", "cat "+newMachineConfigFile).Output()
e2e.Logf("####newMasterMachineNameSuffix is %s\n", string(resultFile))
_, err = oc.AsAdmin().Run("create").Args("-n", "openshift-machine-api", "-f", newMachineConfigFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitMachineStatusRunning(oc, newMasterMachineNameSuffix)
g.By("Delete machine of the unhealthy master node")
_, err = oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "machine", masterMachineNameList[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(getNodeListByLabel(oc, "node-role.kubernetes.io/master="))).To(o.Equal(masterNodeCount))
}) | |||||
test case | openshift/openshift-tests-private | 161a3c45-6f97-4e4a-9abd-bb847605db83 | Longduration-Author:skundu-NonPreRelease-Critical-51109-Delete an existing machine at first and then add a new one. [Disruptive] | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Longduration-Author:skundu-NonPreRelease-Critical-51109-Delete an existing machine at first and then add a new one. [Disruptive]", func() {
g.By("Test for delete an existing machine at first and then add a new one")
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
var (
mMachineop = ""
machineStatusOutput = ""
)
g.By("Make sure all the etcd pods are running")
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
g.By("Get all the master node name & count")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
g.By("Get master machine name list")
output, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(errMachineConfig).NotTo(o.HaveOccurred())
masterMachineNameList := strings.Fields(output)
g.By("At first delete machine of the master node without adding new one")
errMachineDelete := oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "--wait=false", "machine", masterMachineNameList[0]).Execute()
o.Expect(errMachineDelete).NotTo(o.HaveOccurred())
g.By("Verify that the machine is getting deleted and new machine is automatically created")
waitforDesiredMachineCount(oc, masterNodeCount+1)
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
mMachineopraw, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
if errMachineConfig != nil {
e2e.Logf("Failed to get machine name: %s. Trying again", errMachineConfig)
return false, nil
} else {
mMachineop = mMachineopraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "Failed to get master machine names")
mMachineNameList := strings.Fields(mMachineop)
errSt := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
machineStatusraw, errStatus := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o", "jsonpath={.items[*].status.phase}").Output()
if errStatus != nil {
e2e.Logf("Failed to get machine status: %s. Trying again", errStatus)
return false, nil
} else {
machineStatusOutput = machineStatusraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errSt, "Failed to get master machine status")
mMachineStatus := strings.Fields(machineStatusOutput)
e2e.Logf("masterMachineStatus after deletion is %v", mMachineStatus)
o.Expect(in("Deleting", mMachineStatus)).To(o.Equal(true))
newMasterMachine := getNewMastermachine(mMachineStatus, mMachineNameList, "Provision")
g.By("Verify that the new machine is in running state.")
waitMachineStatusRunning(oc, newMasterMachine)
g.By("Verify that the old machine is deleted. The master machine count is same as initial one.")
waitforDesiredMachineCount(oc, masterNodeCount)
}) | |||||
test case | openshift/openshift-tests-private | 41307e40-77ca-4305-944e-30a388979e7e | Longduration-Author:skundu-NonPreRelease-Critical-59377-etcd-operator should not scale-down when all members are healthy. [Disruptive] | ['"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Longduration-Author:skundu-NonPreRelease-Critical-59377-etcd-operator should not scale-down when all members are healthy. [Disruptive]", func() {
g.By("etcd-operator should not scale-down when all members are healthy")
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
var (
mMachineop = ""
machineStatusOutput = ""
)
g.By("Make sure all the etcd pods are running")
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
g.By("Get all the master node name & count")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
e2e.Logf("masterNodeCount is %v", masterNodeCount)
g.By("Get master machine name list")
output, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(errMachineConfig).NotTo(o.HaveOccurred())
masterMachineNameList := strings.Fields(output)
e2e.Logf("masterMachineNameList is %v", masterMachineNameList)
g.By("Delete the CR")
_, err := oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "controlplanemachineset.machine.openshift.io", "cluster").Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForDesiredStateOfCR(oc, "Inactive")
g.By("delete machine of the master node")
errMachineDelete := oc.AsAdmin().Run("delete").Args("-n", "openshift-machine-api", "--wait=false", "machine", masterMachineNameList[0]).Execute()
o.Expect(errMachineDelete).NotTo(o.HaveOccurred())
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
machineStatusOutputraw, errStatus := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o", "jsonpath={.items[*].status.phase}").Output()
if errStatus != nil {
e2e.Logf("Failed to get master machine name: %s. Trying again", errStatus)
return false, nil
} else {
machineStatusOutput = machineStatusOutputraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "Failed to get master machine names")
masterMachineStatus := strings.Fields(machineStatusOutput)
e2e.Logf("masterMachineStatus after deletion is %v", masterMachineStatus)
waitMachineDesiredStatus(oc, masterMachineNameList[0], "Deleting")
g.By("enable the control plane machineset")
errW := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
patch := `[{"op": "replace", "path": "/spec/state", "value": "Active"}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", "openshift-machine-api", "controlplanemachineset.machine.openshift.io", "cluster", "--type=json", "-p", patch).Execute()
if patchErr != nil {
e2e.Logf("unable to apply patch the machineset, error: %s. Trying again ...", patchErr)
return false, nil
} else {
e2e.Logf("successfully patched the machineset.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errW, "unable to enable the comtrol plane machineset.")
waitForDesiredStateOfCR(oc, "Active")
errSt := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
machineStatusraw, errStatus := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o", "jsonpath={.items[*].status.phase}").Output()
if errStatus != nil {
e2e.Logf("Failed to get machine status: %s. Trying again", errStatus)
return false, nil
}
if match, _ := regexp.MatchString("Provision", machineStatusraw); match {
e2e.Logf("machine status Provision showed up")
machineStatusOutput = machineStatusraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errSt, "Failed to get master machine status")
mMachineStatus := strings.Fields(machineStatusOutput)
errWt := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
mMachineopraw, errMachineConfig := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath={.items[*].metadata.name}").Output()
if errMachineConfig != nil {
e2e.Logf("Failed to get machine name: %s. Trying again", errMachineConfig)
return false, nil
} else {
mMachineop = mMachineopraw
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWt, "Failed to get master machine names")
mMachineNameList := strings.Fields(mMachineop)
e2e.Logf("masterMachineStatus after enabling the CPMS is %v", mMachineStatus)
newMasterMachine := getNewMastermachine(mMachineStatus, mMachineNameList, "Provision")
g.By("Verify that the new machine is in running state.")
waitMachineStatusRunning(oc, newMasterMachine)
g.By("Verify that the old machine is deleted. The master machine count is same as initial one.")
waitforDesiredMachineCount(oc, masterNodeCount)
}) | |||||
test case | openshift/openshift-tests-private | 6db57364-eb56-4387-8497-1145de090e08 | Longduration-Author:skundu-NonPreRelease-Critical-53767-cluster-backup.sh exits with a non-zero code in case Etcd backup fails. [Disruptive] | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Longduration-Author:skundu-NonPreRelease-Critical-53767-cluster-backup.sh exits with a non-zero code in case Etcd backup fails. [Disruptive]", func() {
g.By("Test for case OCP-53767 - cluster-backup.sh exits with a non-zero code in case Etcd backup fails.")
g.Skip("Skipping this test temporarily because it is redundant with OCP-42183")
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Run the backup")
masterN, etcdDb := runDRBackup(oc, strings.Fields(masterNodeList[0]))
defer func() {
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "rm", "-rf", "/home/core/assets/backup")
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Corrupt the etcd db file ")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "truncate", "-s", "126k", etcdDb)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Run the restore")
output, _ := exutil.DebugNodeWithOptionsAndChroot(oc, masterN, []string{"-q"}, "/usr/local/bin/cluster-restore.sh", "/home/core/assets/backup")
o.Expect(strings.Contains(output, "Backup appears corrupted. Aborting!")).To(o.BeTrue())
o.Expect(strings.Contains(output, "non-zero exit code")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 3bf6b226-14f8-4be4-b632-4e55f3944920 | Longduration-NonPreRelease-Author:skundu-Critical-68658-CEO prevents member deletion during revision rollout. [Disruptive] | ['"path/filepath"', '"strconv"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Longduration-NonPreRelease-Author:skundu-Critical-68658-CEO prevents member deletion during revision rollout. [Disruptive]", func() {
g.By("Test for case OCP-68658 - CEO prevents member deletion during revision rollout.")
var (
mhcName = "control-plane-health-68658"
nameSpace = "openshift-machine-api"
maxUnhealthy = 1
)
g.By("1. Create MachineHealthCheck")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "dr_mhc.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + mhcName, "NAMESPACE=" + nameSpace, "MAXUNHEALTHY=" + strconv.Itoa(maxUnhealthy)}
defer oc.AsAdmin().Run("delete").Args("mhc", mhcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
g.By("2. Verify MachineHealthCheck")
mhcMaxUnhealthy, errStatus := oc.AsAdmin().Run("get").Args("-n", nameSpace, "mhc", mhcName, "-o", "jsonpath={.spec.maxUnhealthy}").Output()
o.Expect(errStatus).NotTo(o.HaveOccurred())
if mhcMaxUnhealthy != strconv.Itoa(maxUnhealthy) {
e2e.Failf("Failed to verify mhc newly created MHC %v", mhcName)
}
g.By("3. Get all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeCount := len(masterNodeList)
g.By("4. Stop the kubelet service on one of the master nodes")
_, _ = exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "systemctl", "stop", "kubelet")
g.By("5. Ensure etcd oprator goes into degraded state and eventually recovers from it.")
waitForOperatorRestart(oc, "etcd")
waitforDesiredMachineCount(oc, masterNodeCount)
g.By("6. Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
}) | |||||
test case | openshift/openshift-tests-private | d7a28739-fae2-4df0-983d-b18d6f27d900 | Author:skundu-Longduration-NonPreRelease-Critical-77922-workflow of point-in-time restoration. [Disruptive][Slow] | ['"fmt"', '"os"', '"regexp"', '"sync"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/dr_testing_etcd.go | g.It("Author:skundu-Longduration-NonPreRelease-Critical-77922-workflow of point-in-time restoration. [Disruptive][Slow]", func() {
var (
bastionHost = ""
userForBastion = ""
)
g.By("check the platform is supported or not")
supportedList := []string{"aws", "gcp", "azure", "vsphere", "nutanix", "ibmcloud"}
platformListWithoutBastion := []string{"vsphere", "nutanix"}
support := in(iaasPlatform, supportedList)
if support != true {
g.Skip("The platform is not supported now, skip the cases!!")
}
privateKeyForBastion := os.Getenv("SSH_CLOUD_PRIV_KEY")
if privateKeyForBastion == "" {
g.Skip("Failed to get the private key, skip the cases!!")
}
withoutBastion := in(iaasPlatform, platformListWithoutBastion)
if !withoutBastion {
bastionHost = os.Getenv("QE_BASTION_PUBLIC_ADDRESS")
if bastionHost == "" {
g.Skip("Failed to get the qe bastion public ip, skip the case !!")
}
userForBastion = getUserNameAndKeyonBationByPlatform(iaasPlatform)
if userForBastion == "" {
g.Skip("Failed to get the user for bastion host, hence skipping the case!!")
}
}
g.By("make sure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
g.Skip("The ectd pods are not running")
}
defer o.Expect(checkEtcdPodStatus(oc, "openshift-etcd")).To(o.BeTrue())
g.By("select all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
masterNodeInternalIPList := getNodeInternalIPListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("bastion host is : %v", bastionHost)
e2e.Logf("platform is : %v", iaasPlatform)
e2e.Logf("user on bastion is : %v", userForBastion)
g.By("Make sure all the nodes are normal")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
checkMessage := []string{
"SchedulingDisabled",
"NotReady",
}
for _, v := range checkMessage {
if !o.Expect(out).ShouldNot(o.ContainSubstring(v)) {
g.Skip("The cluster nodes is abnormal, skip this case")
}
}
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
g.By("Check kube-apiserver oprator status")
checkOperator(oc, "kube-apiserver")
g.By("Run the backup on the recovery node.")
defer runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo rm -rf /home/core/assets/backup", privateKeyForBastion, userForBastion)
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
msg, err := runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo /usr/local/bin/cluster-backup.sh /home/core/assets/backup", privateKeyForBastion, userForBastion)
if err != nil {
e2e.Logf("backup failed with the err:%v, and try next round", err)
return false, nil
}
if o.Expect(msg).To(o.ContainSubstring("snapshot db and kube resources are successfully saved")) {
e2e.Logf("backup successfully saved.")
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("backup is failed with error"))
g.By("Disable the etcd pods on all the control plane nodes")
for i := 0; i < len(masterNodeInternalIPList); i++ {
_, err := runPSCommand(bastionHost, masterNodeInternalIPList[i], "sudo -E /usr/local/bin/disable-etcd.sh", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Restore the backup on the recovery control plane host")
msg, err := runPSCommand(bastionHost, masterNodeInternalIPList[0], "sudo -E /usr/local/bin/cluster-restore.sh /home/core/assets/backup", privateKeyForBastion, userForBastion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("starting restore-etcd static pod"))
g.By("Wait for the api server to come up after restore operation.")
errW := wait.Poll(20*time.Second, 900*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(masterNodeList[0], out); matched {
e2e.Logf("Api is back online:")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errW, "the Apiserver has not come up after point-in-time restore operation")
g.By("Wait for the nodes to be Ready.")
for i := 0; i < len(masterNodeList); i++ {
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNodeList[i]).Output()
if err != nil {
e2e.Logf("Fail to get master, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(" Ready", out); matched {
e2e.Logf("Node %s is back online:\n%s", masterNodeList[i], out)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "the kubelet start has not brought the node online and Ready")
}
defer checkOperator(oc, "etcd")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": null}}")).Execute()
g.By("Turn off quorum guard to ensure revision rollouts of static pods")
errWait := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
errGrd := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"unsupportedConfigOverrides\": {\"useUnsupportedUnsafeNonHANonProductionUnstableEtcd\": true}}}")).Execute()
if errGrd != nil {
e2e.Logf("server is not ready yet, error: %s. Trying again ...", errGrd)
return false, nil
} else {
e2e.Logf("successfully patched.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errWait, "unable to patch the server to turn off the quorum guard.")
// both etcd and kube-apiserver operators start and end roll out almost simultaneously.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
waitForOperatorRestart(oc, "etcd")
}()
waitForOperatorRestart(oc, "kube-apiserver")
wg.Wait()
}) | |||||
test | openshift/openshift-tests-private | 459dc76c-6630-4aeb-a3d8-5865a0cacb91 | hypershift_dr | import (
"fmt"
"os"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/hypershift_dr.go | package disasterrecovery
import (
"fmt"
"os"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-disasterrecovery] DR_Testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithoutNamespace("default")
controlPlaneNamespace string
clusterNames string
guestClusterNamespace string
buildPruningBaseDir string
)
g.BeforeEach(func() {
output, err := oc.AsAdmin().Run("get").Args("pods", "-n", "hypershift", "-ojsonpath={.items[*].metadata.name}").Output()
if err != nil || len(output) <= 0 {
g.Skip("hypershift operator not found, skip test.")
}
output, err = oc.AsAdmin().Run("get").Args("pod", "-n", "hypershift", "-o=jsonpath={.items[0].status.phase}").Output()
if err != nil || !strings.Contains(output, "Running") {
g.Skip("hypershift pod is not in running.")
}
e2e.Logf("get first guest cluster to run test.")
guestClusterNamespace, clusterNames = getHostedClusterName(oc)
if len(guestClusterNamespace) <= 0 || len(clusterNames) <= 0 {
g.Skip("hypershift guest cluster not found, skip test.")
}
controlPlaneNamespace = guestClusterNamespace + "-" + clusterNames
buildPruningBaseDir = exutil.FixturePath("testdata", "etcd")
})
g.AfterEach(func() {
if !healthyCheck(oc) {
e2e.Failf("Cluster healthy check failed after the test.")
}
output0, err0 := oc.AsAdmin().Run("get").Args("pod", "-n", "hypershift", "-o=jsonpath={.items[0].status.phase}").Output()
if !strings.Contains(output0, "Running") || err0 != nil {
e2e.Failf("hypershift pod is not in running.")
}
})
// author: [email protected]
g.It("Author:geliu-Critical-77423-Backing up and restoring etcd on hypershift hosted cluster [Disruptive]", func() {
g.By("Pause reconciliation of the hosted cluster.")
patch := fmt.Sprintf("{\"spec\": {\"pausedUntil\": \"true\"}}")
output := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", guestClusterNamespace, "hostedclusters/"+clusterNames, "--type=merge", "-p", patch).Execute()
o.Expect(output).NotTo(o.HaveOccurred())
g.By("Scale down the kube-apiserver, openshift-apiserver, openshift-oauth-apiserver.")
err := oc.AsAdmin().Run("scale").Args("-n", controlPlaneNamespace, "deployment/kube-apiserver", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("scale").Args("-n", controlPlaneNamespace, "deployment/openshift-apiserver", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("scale").Args("-n", controlPlaneNamespace, "deployment/openshift-oauth-apiserver", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Take a snapshot of Etcd.")
etcdBackupCmd := "etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=https://localhost:2379 snapshot save /var/lib/snapshot.db"
output1, err := oc.AsAdmin().Run("exec").Args("-n", controlPlaneNamespace, "etcd-0", "--", "sh", "-c", etcdBackupCmd).Output()
if !strings.Contains(output1, "Snapshot saved at /var/lib/snapshot.db") || err != nil {
e2e.Failf("Etcd backup is not succeed.")
}
g.By("Make a local copy of the snapshot.")
err = oc.AsAdmin().Run("cp").Args(controlPlaneNamespace+"/"+"etcd-0"+":"+"/var/lib/snapshot.db", "/tmp/snapshot.db", "--retries=5").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Scale down the Etcd statefulset.")
err = oc.Run("scale").Args("-n", controlPlaneNamespace, "statefulset/etcd", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete volumes for second and third members.")
err = oc.AsAdmin().Run("delete").Args("-n", controlPlaneNamespace, "pvc/data-etcd-1", "pvc/data-etcd-2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a pod to access the first etcd member’s data.")
e2e.Logf("Get the Etcd image from statefulset/etcd.")
etcdImage, err := oc.AsAdmin().Run("get").Args("-n", controlPlaneNamespace, "statefulset/etcd", "-o=jsonpath={.spec.template.spec.containers[0].image}").Output()
if len(etcdImage) <= 0 || err != nil {
e2e.Failf("Etcd image is not extracted successfully.")
}
e2e.Logf("Prepare etcd deployment file.")
etcdDeployUptFile := "/tmp/etcd_data_deployment.yaml"
etcdDeployFile := filepath.Join(buildPruningBaseDir, "dr_etcd_image.yaml")
defer os.RemoveAll(etcdDeployUptFile)
if !fileReplaceKeyword(etcdDeployFile, etcdDeployUptFile, "ETCD_IMAGE", etcdImage) {
e2e.Failf("keyword replace in etcd deploy Yaml file Failure.")
}
defer oc.AsAdmin().Run("delete").Args("-f", etcdDeployUptFile, "-n", controlPlaneNamespace).Execute()
_, err = oc.AsAdmin().Run("create").Args("-n", controlPlaneNamespace, "-f", etcdDeployUptFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for etcd-data deployment/pod running...")
waitForPodReady(oc, controlPlaneNamespace, "app=etcd-data", 180)
g.By("Remove old data from the etcd-data pod and create new dir.")
etcdDataPod, err := oc.AsAdmin().Run("get").Args("pods", "-l", "app=etcd-data", "-n", controlPlaneNamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("exec").Args(etcdDataPod, "-n", controlPlaneNamespace, "--", "rm", "-rf", "/var/lib/data").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("exec").Args(etcdDataPod, "-n", controlPlaneNamespace, "--", "mkdir", "-p", "/var/lib/data").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Move snapshot.db from local copy to etcd-data pod.")
err = oc.AsAdmin().Run("cp").Args("/tmp/snapshot.db", controlPlaneNamespace+"/"+etcdDataPod+":"+"/var/lib/snapshot.db", "--retries=5").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Restore the Etcd snapshot.")
cmd := fmt.Sprintf("etcdutl snapshot restore /var/lib/snapshot.db --data-dir=/var/lib/data --skip-hash-check --name etcd-0 --initial-cluster-token=etcd-cluster --initial-cluster etcd-0=https://etcd-0.etcd-discovery.%s.svc:2380,etcd-1=https://etcd-1.etcd-discovery.%s.svc:2380,etcd-2=https://etcd-2.etcd-discovery.%s.svc:2380 --initial-advertise-peer-urls https://etcd-0.etcd-discovery.%s.svc:2380", controlPlaneNamespace, controlPlaneNamespace, controlPlaneNamespace, controlPlaneNamespace)
err = oc.AsAdmin().Run("exec").Args(etcdDataPod, "-n", controlPlaneNamespace, "--", "sh", "-c", cmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("scale statefulset/etcd replicas to 3.")
err = oc.Run("scale").Args("-n", controlPlaneNamespace, "statefulset/etcd", "--replicas=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for the Etcd member pods to return and report as available.")
podAllRunning := checkEtcdPodStatus(oc, controlPlaneNamespace)
if podAllRunning != true {
e2e.Failf("The ectd pods are not running")
}
g.By("Scale up all etcd-writer deployments.")
err = oc.Run("scale").Args("deployment", "-n", controlPlaneNamespace, "kube-apiserver", "openshift-apiserver", "openshift-oauth-apiserver", "--replicas=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Restore reconciliation of the hosted cluster.")
patch = fmt.Sprintf("{\"spec\":{\"pausedUntil\": \"\"}}")
output = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd/cluster", "--type=merge", "-p", patch).Execute()
o.Expect(output).NotTo(o.HaveOccurred())
})
})
| package disasterrecovery | ||||
test case | openshift/openshift-tests-private | 80087e59-e91a-4087-9ac6-6fb7107a06c8 | Author:geliu-Critical-77423-Backing up and restoring etcd on hypershift hosted cluster [Disruptive] | ['"fmt"', '"os"', '"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/hypershift_dr.go | g.It("Author:geliu-Critical-77423-Backing up and restoring etcd on hypershift hosted cluster [Disruptive]", func() {
g.By("Pause reconciliation of the hosted cluster.")
patch := fmt.Sprintf("{\"spec\": {\"pausedUntil\": \"true\"}}")
output := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", guestClusterNamespace, "hostedclusters/"+clusterNames, "--type=merge", "-p", patch).Execute()
o.Expect(output).NotTo(o.HaveOccurred())
g.By("Scale down the kube-apiserver, openshift-apiserver, openshift-oauth-apiserver.")
err := oc.AsAdmin().Run("scale").Args("-n", controlPlaneNamespace, "deployment/kube-apiserver", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("scale").Args("-n", controlPlaneNamespace, "deployment/openshift-apiserver", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("scale").Args("-n", controlPlaneNamespace, "deployment/openshift-oauth-apiserver", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Take a snapshot of Etcd.")
etcdBackupCmd := "etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=https://localhost:2379 snapshot save /var/lib/snapshot.db"
output1, err := oc.AsAdmin().Run("exec").Args("-n", controlPlaneNamespace, "etcd-0", "--", "sh", "-c", etcdBackupCmd).Output()
if !strings.Contains(output1, "Snapshot saved at /var/lib/snapshot.db") || err != nil {
e2e.Failf("Etcd backup is not succeed.")
}
g.By("Make a local copy of the snapshot.")
err = oc.AsAdmin().Run("cp").Args(controlPlaneNamespace+"/"+"etcd-0"+":"+"/var/lib/snapshot.db", "/tmp/snapshot.db", "--retries=5").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Scale down the Etcd statefulset.")
err = oc.Run("scale").Args("-n", controlPlaneNamespace, "statefulset/etcd", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete volumes for second and third members.")
err = oc.AsAdmin().Run("delete").Args("-n", controlPlaneNamespace, "pvc/data-etcd-1", "pvc/data-etcd-2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a pod to access the first etcd member’s data.")
e2e.Logf("Get the Etcd image from statefulset/etcd.")
etcdImage, err := oc.AsAdmin().Run("get").Args("-n", controlPlaneNamespace, "statefulset/etcd", "-o=jsonpath={.spec.template.spec.containers[0].image}").Output()
if len(etcdImage) <= 0 || err != nil {
e2e.Failf("Etcd image is not extracted successfully.")
}
e2e.Logf("Prepare etcd deployment file.")
etcdDeployUptFile := "/tmp/etcd_data_deployment.yaml"
etcdDeployFile := filepath.Join(buildPruningBaseDir, "dr_etcd_image.yaml")
defer os.RemoveAll(etcdDeployUptFile)
if !fileReplaceKeyword(etcdDeployFile, etcdDeployUptFile, "ETCD_IMAGE", etcdImage) {
e2e.Failf("keyword replace in etcd deploy Yaml file Failure.")
}
defer oc.AsAdmin().Run("delete").Args("-f", etcdDeployUptFile, "-n", controlPlaneNamespace).Execute()
_, err = oc.AsAdmin().Run("create").Args("-n", controlPlaneNamespace, "-f", etcdDeployUptFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for etcd-data deployment/pod running...")
waitForPodReady(oc, controlPlaneNamespace, "app=etcd-data", 180)
g.By("Remove old data from the etcd-data pod and create new dir.")
etcdDataPod, err := oc.AsAdmin().Run("get").Args("pods", "-l", "app=etcd-data", "-n", controlPlaneNamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("exec").Args(etcdDataPod, "-n", controlPlaneNamespace, "--", "rm", "-rf", "/var/lib/data").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("exec").Args(etcdDataPod, "-n", controlPlaneNamespace, "--", "mkdir", "-p", "/var/lib/data").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Move snapshot.db from local copy to etcd-data pod.")
err = oc.AsAdmin().Run("cp").Args("/tmp/snapshot.db", controlPlaneNamespace+"/"+etcdDataPod+":"+"/var/lib/snapshot.db", "--retries=5").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Restore the Etcd snapshot.")
cmd := fmt.Sprintf("etcdutl snapshot restore /var/lib/snapshot.db --data-dir=/var/lib/data --skip-hash-check --name etcd-0 --initial-cluster-token=etcd-cluster --initial-cluster etcd-0=https://etcd-0.etcd-discovery.%s.svc:2380,etcd-1=https://etcd-1.etcd-discovery.%s.svc:2380,etcd-2=https://etcd-2.etcd-discovery.%s.svc:2380 --initial-advertise-peer-urls https://etcd-0.etcd-discovery.%s.svc:2380", controlPlaneNamespace, controlPlaneNamespace, controlPlaneNamespace, controlPlaneNamespace)
err = oc.AsAdmin().Run("exec").Args(etcdDataPod, "-n", controlPlaneNamespace, "--", "sh", "-c", cmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("scale statefulset/etcd replicas to 3.")
err = oc.Run("scale").Args("-n", controlPlaneNamespace, "statefulset/etcd", "--replicas=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for the Etcd member pods to return and report as available.")
podAllRunning := checkEtcdPodStatus(oc, controlPlaneNamespace)
if podAllRunning != true {
e2e.Failf("The ectd pods are not running")
}
g.By("Scale up all etcd-writer deployments.")
err = oc.Run("scale").Args("deployment", "-n", controlPlaneNamespace, "kube-apiserver", "openshift-apiserver", "openshift-oauth-apiserver", "--replicas=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Restore reconciliation of the hosted cluster.")
patch = fmt.Sprintf("{\"spec\":{\"pausedUntil\": \"\"}}")
output = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd/cluster", "--type=merge", "-p", patch).Execute()
o.Expect(output).NotTo(o.HaveOccurred())
}) | |||||
file | openshift/openshift-tests-private | 28d54e29-245a-4e5d-bd54-0fc5a3bbda05 | util | import (
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"bufio"
"io"
"math/rand"
"os"
"os/exec"
"regexp"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | package disasterrecovery
import (
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"bufio"
"io"
"math/rand"
"os"
"os/exec"
"regexp"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func getNodeListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
}
func getPodListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-etcd", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podNameList := strings.Fields(output)
return podNameList
}
func runDRBackup(oc *exutil.CLI, nodeNameList []string) (nodeName string, etcddb string) {
var nodeN, etcdDb string
succBackup := false
for _, node := range nodeNameList {
backupout, err := exutil.DebugNodeWithOptionsAndChroot(oc, node, []string{"-q"}, "/usr/local/bin/cluster-backup.sh", "/home/core/assets/backup")
if err != nil {
e2e.Logf("Try for next master!")
continue
}
if strings.Contains(backupout, "Snapshot saved at") && err == nil {
e2e.Logf("backup on master %v ", node)
regexp, _ := regexp.Compile("/home/core/assets/backup/snapshot.*db")
etcdDb = regexp.FindString(backupout)
nodeN = node
succBackup = true
break
}
}
if !succBackup {
e2e.Failf("Failed to run the backup!")
}
return nodeN, etcdDb
}
func getUserNameAndKeyonBationByPlatform(iaasPlatform string) string {
user := ""
switch iaasPlatform {
case "aws":
user = os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
case "gcp":
user = os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
case "azure":
user = os.Getenv("SSH_CLOUD_PRIV_AZURE_USER")
case "ibmcloud":
user = os.Getenv("SSH_CLOUD_PRIV_IBMCLOUD_USER")
}
return user
}
func getNewMastermachine(masterMachineStatus []string, masterMachineNameList []string, desiredStatus string) string {
newMasterMachine := ""
for p, v := range masterMachineStatus {
if strings.Contains(v, desiredStatus) {
newMasterMachine = masterMachineNameList[p]
break
}
}
e2e.Logf("New machine is %s", newMasterMachine)
return newMasterMachine
}
func getNodeInternalIPListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath='{.items[*].status.addresses[?(.type==\"InternalIP\")].address}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeInternalIPList := strings.Fields(strings.ReplaceAll(output, "'", ""))
return nodeInternalIPList
}
// Run the etcdrestroe shell script command on master or node
func runPSCommand(bastionHost string, nodeInternalIP string, command string, privateKeyForBastion string, userForBastion string) (result string, err error) {
var msg []byte
if bastionHost != "" {
msg, err = exec.Command("bash", "-c", "chmod 600 "+privateKeyForBastion+";ssh -i "+privateKeyForBastion+" -o StrictHostKeyChecking=no -o ProxyCommand=\"ssh -o IdentityFile="+privateKeyForBastion+" -o StrictHostKeyChecking=no -W %h:%p "+userForBastion+"@"+bastionHost+"\""+" core@"+nodeInternalIP+" "+command).CombinedOutput()
} else {
msg, err = exec.Command("bash", "-c", "chmod 600 "+privateKeyForBastion+";ssh -i "+privateKeyForBastion+" -o StrictHostKeyChecking=no core@"+nodeInternalIP+" "+command).CombinedOutput()
}
if err != nil {
e2e.Logf("Message from bash -> %s", string(msg))
}
return string(msg), err
}
func waitForOperatorRestart(oc *exutil.CLI, operatorName string) {
g.By("Check the operator should be in Progressing")
err := wait.Poll(20*time.Second, 600*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", operatorName).Output()
if err != nil {
e2e.Logf("clusteroperator %s has not started new progress, error: %s. Trying again", operatorName, err)
return false, nil
}
if matched, _ := regexp.MatchString("True.*True.*False", output); matched {
e2e.Logf("clusteroperator %s is Progressing:\n%s", operatorName, output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator is not Progressing")
g.By("Wait for the operator to rollout")
err = wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", operatorName).Output()
if err != nil {
e2e.Logf("Fail to get clusteroperator %s, error: %s. Trying again", operatorName, err)
return false, nil
}
if matched, _ := regexp.MatchString("True.*False.*False", output); matched {
e2e.Logf("clusteroperator %s has recovered to normal:\n%s", operatorName, output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator has not recovered to normal")
}
func waitForContainerDisappear(bastionHost string, nodeInternalIP string, command string, privateKeyForBastion string, userForBastion string) {
g.By("Wait for the container to disappear")
err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
msg, err := runPSCommand(bastionHost, nodeInternalIP, command, privateKeyForBastion, userForBastion)
if err != nil {
e2e.Logf("Fail to get container, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString("", msg); matched {
e2e.Logf("The container has disappeared")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The pod is not disappeared as expected")
}
// Check if the iaasPlatform in the supported list
func in(target string, strArray []string) bool {
for _, element := range strArray {
if target == element {
return true
}
}
return false
}
// make sure all the ectd pods are running
func checkEtcdPodStatus(oc *exutil.CLI, ns string) bool {
err := wait.Poll(20*time.Second, 180*time.Second, func() (bool, error) {
output, errp := oc.AsAdmin().Run("get").Args("pods", "-l", "app=etcd", "-n", ns, "-o=jsonpath='{.items[*].status.phase}'").Output()
if errp != nil {
e2e.Logf("Failed to get etcd pod status, error: %s. Trying again", errp)
return false, nil
}
statusList := strings.Fields(output)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("Found etcd pod is not running")
return false, nil
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Sadly etcd pods are not Running.")
if err == nil {
return true
} else {
return false
}
}
// make sure all the machine are running
func waitMachineStatusRunning(oc *exutil.CLI, newMasterMachineName string) {
err := wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
machineStatus, errSt := oc.AsAdmin().Run("get").Args("-n", "openshift-machine-api", exutil.MapiMachine, newMasterMachineName, "-o=jsonpath='{.status.phase}'").Output()
if errSt != nil {
e2e.Logf("Failed to get machineStatus, error: %s. Trying again", errSt)
return false, nil
}
if match, _ := regexp.MatchString("Running", machineStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Sadly the machine is not Running.")
}
// make sure correct number of machines are present
func waitforDesiredMachineCount(oc *exutil.CLI, machineCount int) {
err := wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
output, errGetMachine := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath='{.items[*].metadata.name}'").Output()
if errGetMachine != nil {
e2e.Logf("Failed to get machinecount, error: %s. Trying again", errGetMachine)
return false, nil
}
machineNameList := strings.Fields(output)
if len(machineNameList) == machineCount {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Sadly the machine count didn't match")
}
// update new machine file
func updateMachineYmlFile(machineYmlFile string, oldMachineName string, newMasterMachineName string) bool {
fileName := machineYmlFile
in, err := os.OpenFile(fileName, os.O_RDONLY, 0666)
if err != nil {
e2e.Logf("open machineYaml file fail:", err)
return false
}
defer in.Close()
out, err := os.OpenFile(strings.Replace(fileName, "machine.yaml", "machineUpd.yaml", -1), os.O_RDWR|os.O_CREATE, 0766)
if err != nil {
e2e.Logf("Open write file fail:", err)
return false
}
defer out.Close()
br := bufio.NewReader(in)
index := 1
matchTag := false
newLine := ""
for {
line, _, err := br.ReadLine()
if err == io.EOF {
break
}
if err != nil {
e2e.Logf("read err:", err)
return false
}
if strings.Contains(string(line), "providerID: ") {
matchTag = true
} else if strings.Contains(string(line), "status:") {
break
} else if strings.Contains(string(line), "generation: ") {
matchTag = true
} else if strings.Contains(string(line), "machine.openshift.io/instance-state: ") {
matchTag = true
} else if strings.Contains(string(line), "resourceVersion: ") {
matchTag = true
} else if strings.Contains(string(line), oldMachineName) {
newLine = strings.Replace(string(line), oldMachineName, newMasterMachineName, -1)
} else {
newLine = string(line)
}
if !matchTag {
_, err = out.WriteString(newLine + "\n")
if err != nil {
e2e.Logf("Write to file fail:", err)
return false
}
} else {
matchTag = false
}
index++
}
e2e.Logf("Update Machine FINISH!")
return true
}
// make sure operator is not processing and degraded
func checkOperator(oc *exutil.CLI, operatorName string) {
var output string
var err error
var split []string
if operatorName == "" {
output, err = oc.AsAdmin().Run("get").Args("clusteroperator", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
split = strings.Split(output, " ")
} else {
split = append(split, operatorName)
}
err = wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
for _, item := range split {
output, err = oc.AsAdmin().Run("get").Args("clusteroperator", item).Output()
if err != nil {
e2e.Logf("Failed to retrieve clusteroperator %s status, error: %s. Trying again", item, err)
return false, nil
}
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal, will try next time:\n", item)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator abnormal")
}
func waitMachineDesiredStatus(oc *exutil.CLI, newMasterMachineName string, desiredState string) {
err := wait.Poll(60*time.Second, 480*time.Second, func() (bool, error) {
machineStatus, err := oc.AsAdmin().Run("get").Args("-n", "openshift-machine-api", exutil.MapiMachine, newMasterMachineName, "-o=jsonpath='{.status.phase}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if match, _ := regexp.MatchString(desiredState, machineStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "sorry the machine is not in desired state")
}
func waitForDesiredStateOfCR(oc *exutil.CLI, desiredState string) {
err := wait.Poll(60*time.Second, 480*time.Second, func() (bool, error) {
statusOfCR, err := oc.AsAdmin().Run("get").Args("controlplanemachineset.machine.openshift.io", "cluster", "-n", "openshift-machine-api", "-o=jsonpath={.spec.state}").Output()
if err != nil {
e2e.Logf("Failed to get CR status, error: %s. Trying again", err)
return false, nil
}
e2e.Logf("statusOfCR is %v ", statusOfCR)
if statusOfCR == desiredState {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "sorry the CR is not in desired state")
}
// Checks whether cluster operator is healthy.
func IsCOHealthy(oc *exutil.CLI, operatorName string) bool {
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", operatorName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal", operatorName)
return false
}
return true
}
// Checks cluster operator is healthy
func healthyCheck(oc *exutil.CLI) bool {
e2e.Logf("make sure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
e2e.Logf("The ectd pods are not running")
return false
}
e2e.Logf("Check all oprators status")
checkOperator(oc, "")
e2e.Logf("Make sure all the nodes are normal")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
checkMessage := []string{
"SchedulingDisabled",
"NotReady",
}
for _, v := range checkMessage {
if strings.Contains(out, v) {
e2e.Logf("The cluster nodes is abnormal.")
return false
}
}
return true
}
// Read .../testdata/disaster_recovery/xx and replace keywords with var then write new file
func fileReplaceKeyword(oldFileName string, newFileName string, keyWords string, varValue string) bool {
if newFileName == "" || newFileName == "" || keyWords == "" {
e2e.Failf("newFileName/newFileName/keyWords is null before replace.")
return false
}
oldFileStr, err := exec.Command("bash", "-c", "cat "+oldFileName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
newFileStr := strings.Replace(string(oldFileStr), keyWords, varValue, -1)
pf, errp := os.Create(newFileName)
o.Expect(errp).NotTo(o.HaveOccurred())
defer pf.Close()
w2 := bufio.NewWriter(pf)
_, perr := w2.WriteString(newFileStr)
w2.Flush()
o.Expect(perr).NotTo(o.HaveOccurred())
return true
}
// Wait for pod(ns, label) ready
func waitForPodReady(oc *exutil.CLI, ns string, labelStr string, waitSecond int32) {
err := wait.Poll(20*time.Second, time.Duration(waitSecond)*time.Second, func() (bool, error) {
output1, err := oc.AsAdmin().Run("get").Args("pods", "-l", labelStr, "-n", ns, "-o=jsonpath='{.items[*].status.phase}'").Output()
if err != nil {
e2e.Logf("Failed to get pod status, error: %s. Trying again", err)
return false, nil
}
statusList := strings.Fields(output1)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("Found etcd pod is not running")
return false, nil
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "pod with label "+labelStr+"is not Running.")
}
// get hosted cluster namespace and hc name
func getHostedClusterName(oc *exutil.CLI) (string, string) {
var clusterNs string
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("project", "-o=custom-columns=NAME:.metadata.name", "--no-headers").Output()
if err != nil || len(output) <= 0 {
e2e.Failf("Fail to get project name list.")
}
projectNameList := strings.Fields(output)
for i := 0; i < len(projectNameList); i++ {
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", projectNameList[i], "hc", "-ojsonpath={.items[*].metadata.name}").Output()
if len(output) > 0 {
e2e.Logf("hypershift hosted cluster namespace is: %s, hc name is %s", projectNameList[i], output)
clusterNs = projectNameList[i]
break
}
}
return clusterNs, output
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 04afa444-d687-400c-b52d-81a4617b1e6b | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | c3e8ec66-b8ce-47a0-a684-e3f0bea08da1 | getNodeListByLabel | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getNodeListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 3519d348-53ad-4b10-9f53-7e92baaa003a | getPodListByLabel | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getPodListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-etcd", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podNameList := strings.Fields(output)
return podNameList
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | f98f20d2-c25a-4727-817a-a45aa5b1d6bd | runDRBackup | ['"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func runDRBackup(oc *exutil.CLI, nodeNameList []string) (nodeName string, etcddb string) {
var nodeN, etcdDb string
succBackup := false
for _, node := range nodeNameList {
backupout, err := exutil.DebugNodeWithOptionsAndChroot(oc, node, []string{"-q"}, "/usr/local/bin/cluster-backup.sh", "/home/core/assets/backup")
if err != nil {
e2e.Logf("Try for next master!")
continue
}
if strings.Contains(backupout, "Snapshot saved at") && err == nil {
e2e.Logf("backup on master %v ", node)
regexp, _ := regexp.Compile("/home/core/assets/backup/snapshot.*db")
etcdDb = regexp.FindString(backupout)
nodeN = node
succBackup = true
break
}
}
if !succBackup {
e2e.Failf("Failed to run the backup!")
}
return nodeN, etcdDb
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 80d5eea8-07f3-48fa-9a85-d10421d8c282 | getUserNameAndKeyonBationByPlatform | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getUserNameAndKeyonBationByPlatform(iaasPlatform string) string {
user := ""
switch iaasPlatform {
case "aws":
user = os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
case "gcp":
user = os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
case "azure":
user = os.Getenv("SSH_CLOUD_PRIV_AZURE_USER")
case "ibmcloud":
user = os.Getenv("SSH_CLOUD_PRIV_IBMCLOUD_USER")
}
return user
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 7482a745-9366-4715-b7fe-9e0f86981102 | getNewMastermachine | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getNewMastermachine(masterMachineStatus []string, masterMachineNameList []string, desiredStatus string) string {
newMasterMachine := ""
for p, v := range masterMachineStatus {
if strings.Contains(v, desiredStatus) {
newMasterMachine = masterMachineNameList[p]
break
}
}
e2e.Logf("New machine is %s", newMasterMachine)
return newMasterMachine
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 78626fb9-b6f1-426f-acd8-8ee58b0a3d4a | getNodeInternalIPListByLabel | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getNodeInternalIPListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath='{.items[*].status.addresses[?(.type==\"InternalIP\")].address}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeInternalIPList := strings.Fields(strings.ReplaceAll(output, "'", ""))
return nodeInternalIPList
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | adda438f-4f97-43da-b462-00318962f878 | runPSCommand | ['"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func runPSCommand(bastionHost string, nodeInternalIP string, command string, privateKeyForBastion string, userForBastion string) (result string, err error) {
var msg []byte
if bastionHost != "" {
msg, err = exec.Command("bash", "-c", "chmod 600 "+privateKeyForBastion+";ssh -i "+privateKeyForBastion+" -o StrictHostKeyChecking=no -o ProxyCommand=\"ssh -o IdentityFile="+privateKeyForBastion+" -o StrictHostKeyChecking=no -W %h:%p "+userForBastion+"@"+bastionHost+"\""+" core@"+nodeInternalIP+" "+command).CombinedOutput()
} else {
msg, err = exec.Command("bash", "-c", "chmod 600 "+privateKeyForBastion+";ssh -i "+privateKeyForBastion+" -o StrictHostKeyChecking=no core@"+nodeInternalIP+" "+command).CombinedOutput()
}
if err != nil {
e2e.Logf("Message from bash -> %s", string(msg))
}
return string(msg), err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 283ff263-22bb-4d23-a6fd-da843860c3cb | waitForOperatorRestart | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitForOperatorRestart(oc *exutil.CLI, operatorName string) {
g.By("Check the operator should be in Progressing")
err := wait.Poll(20*time.Second, 600*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", operatorName).Output()
if err != nil {
e2e.Logf("clusteroperator %s has not started new progress, error: %s. Trying again", operatorName, err)
return false, nil
}
if matched, _ := regexp.MatchString("True.*True.*False", output); matched {
e2e.Logf("clusteroperator %s is Progressing:\n%s", operatorName, output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator is not Progressing")
g.By("Wait for the operator to rollout")
err = wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", operatorName).Output()
if err != nil {
e2e.Logf("Fail to get clusteroperator %s, error: %s. Trying again", operatorName, err)
return false, nil
}
if matched, _ := regexp.MatchString("True.*False.*False", output); matched {
e2e.Logf("clusteroperator %s has recovered to normal:\n%s", operatorName, output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator has not recovered to normal")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | b21ab916-be0e-4293-b6a7-daea28af6151 | waitForContainerDisappear | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitForContainerDisappear(bastionHost string, nodeInternalIP string, command string, privateKeyForBastion string, userForBastion string) {
g.By("Wait for the container to disappear")
err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
msg, err := runPSCommand(bastionHost, nodeInternalIP, command, privateKeyForBastion, userForBastion)
if err != nil {
e2e.Logf("Fail to get container, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString("", msg); matched {
e2e.Logf("The container has disappeared")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The pod is not disappeared as expected")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | ab1414db-a512-48ed-aed6-beaf57a63d3a | in | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func in(target string, strArray []string) bool {
for _, element := range strArray {
if target == element {
return true
}
}
return false
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | c3048a0a-af01-4f97-b1d7-085553e2b50d | checkEtcdPodStatus | ['"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func checkEtcdPodStatus(oc *exutil.CLI, ns string) bool {
err := wait.Poll(20*time.Second, 180*time.Second, func() (bool, error) {
output, errp := oc.AsAdmin().Run("get").Args("pods", "-l", "app=etcd", "-n", ns, "-o=jsonpath='{.items[*].status.phase}'").Output()
if errp != nil {
e2e.Logf("Failed to get etcd pod status, error: %s. Trying again", errp)
return false, nil
}
statusList := strings.Fields(output)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("Found etcd pod is not running")
return false, nil
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Sadly etcd pods are not Running.")
if err == nil {
return true
} else {
return false
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | b9dcf876-7767-4c67-8d61-76093fa85288 | waitMachineStatusRunning | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitMachineStatusRunning(oc *exutil.CLI, newMasterMachineName string) {
err := wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
machineStatus, errSt := oc.AsAdmin().Run("get").Args("-n", "openshift-machine-api", exutil.MapiMachine, newMasterMachineName, "-o=jsonpath='{.status.phase}'").Output()
if errSt != nil {
e2e.Logf("Failed to get machineStatus, error: %s. Trying again", errSt)
return false, nil
}
if match, _ := regexp.MatchString("Running", machineStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Sadly the machine is not Running.")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 96406009-e264-4c8b-af49-c61de8f0bec9 | waitforDesiredMachineCount | ['"io"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitforDesiredMachineCount(oc *exutil.CLI, machineCount int) {
err := wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
output, errGetMachine := oc.AsAdmin().Run("get").Args(exutil.MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=master", "-o=jsonpath='{.items[*].metadata.name}'").Output()
if errGetMachine != nil {
e2e.Logf("Failed to get machinecount, error: %s. Trying again", errGetMachine)
return false, nil
}
machineNameList := strings.Fields(output)
if len(machineNameList) == machineCount {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Sadly the machine count didn't match")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | a0f87545-75e4-41bf-adf0-2ee67b947071 | updateMachineYmlFile | ['"bufio"', '"io"', '"os"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func updateMachineYmlFile(machineYmlFile string, oldMachineName string, newMasterMachineName string) bool {
fileName := machineYmlFile
in, err := os.OpenFile(fileName, os.O_RDONLY, 0666)
if err != nil {
e2e.Logf("open machineYaml file fail:", err)
return false
}
defer in.Close()
out, err := os.OpenFile(strings.Replace(fileName, "machine.yaml", "machineUpd.yaml", -1), os.O_RDWR|os.O_CREATE, 0766)
if err != nil {
e2e.Logf("Open write file fail:", err)
return false
}
defer out.Close()
br := bufio.NewReader(in)
index := 1
matchTag := false
newLine := ""
for {
line, _, err := br.ReadLine()
if err == io.EOF {
break
}
if err != nil {
e2e.Logf("read err:", err)
return false
}
if strings.Contains(string(line), "providerID: ") {
matchTag = true
} else if strings.Contains(string(line), "status:") {
break
} else if strings.Contains(string(line), "generation: ") {
matchTag = true
} else if strings.Contains(string(line), "machine.openshift.io/instance-state: ") {
matchTag = true
} else if strings.Contains(string(line), "resourceVersion: ") {
matchTag = true
} else if strings.Contains(string(line), oldMachineName) {
newLine = strings.Replace(string(line), oldMachineName, newMasterMachineName, -1)
} else {
newLine = string(line)
}
if !matchTag {
_, err = out.WriteString(newLine + "\n")
if err != nil {
e2e.Logf("Write to file fail:", err)
return false
}
} else {
matchTag = false
}
index++
}
e2e.Logf("Update Machine FINISH!")
return true
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 333c0c49-131d-4dff-818e-cdf8c313b5a4 | checkOperator | ['"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func checkOperator(oc *exutil.CLI, operatorName string) {
var output string
var err error
var split []string
if operatorName == "" {
output, err = oc.AsAdmin().Run("get").Args("clusteroperator", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
split = strings.Split(output, " ")
} else {
split = append(split, operatorName)
}
err = wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
for _, item := range split {
output, err = oc.AsAdmin().Run("get").Args("clusteroperator", item).Output()
if err != nil {
e2e.Logf("Failed to retrieve clusteroperator %s status, error: %s. Trying again", item, err)
return false, nil
}
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal, will try next time:\n", item)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator abnormal")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 0089e09e-c3af-4ee6-8012-b7b240c1e9e7 | waitMachineDesiredStatus | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitMachineDesiredStatus(oc *exutil.CLI, newMasterMachineName string, desiredState string) {
err := wait.Poll(60*time.Second, 480*time.Second, func() (bool, error) {
machineStatus, err := oc.AsAdmin().Run("get").Args("-n", "openshift-machine-api", exutil.MapiMachine, newMasterMachineName, "-o=jsonpath='{.status.phase}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if match, _ := regexp.MatchString(desiredState, machineStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "sorry the machine is not in desired state")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 09da66ce-0c98-40fd-808a-c84ddd81c802 | waitForDesiredStateOfCR | ['"io"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitForDesiredStateOfCR(oc *exutil.CLI, desiredState string) {
err := wait.Poll(60*time.Second, 480*time.Second, func() (bool, error) {
statusOfCR, err := oc.AsAdmin().Run("get").Args("controlplanemachineset.machine.openshift.io", "cluster", "-n", "openshift-machine-api", "-o=jsonpath={.spec.state}").Output()
if err != nil {
e2e.Logf("Failed to get CR status, error: %s. Trying again", err)
return false, nil
}
e2e.Logf("statusOfCR is %v ", statusOfCR)
if statusOfCR == desiredState {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "sorry the CR is not in desired state")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 35b0889c-f293-4b94-9f9f-261a41381bdd | IsCOHealthy | ['"regexp"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func IsCOHealthy(oc *exutil.CLI, operatorName string) bool {
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", operatorName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal", operatorName)
return false
}
return true
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 9b1f065b-3f60-4fa8-8031-6d5b866c914a | healthyCheck | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func healthyCheck(oc *exutil.CLI) bool {
e2e.Logf("make sure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc, "openshift-etcd")
if podAllRunning != true {
e2e.Logf("The ectd pods are not running")
return false
}
e2e.Logf("Check all oprators status")
checkOperator(oc, "")
e2e.Logf("Make sure all the nodes are normal")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
checkMessage := []string{
"SchedulingDisabled",
"NotReady",
}
for _, v := range checkMessage {
if strings.Contains(out, v) {
e2e.Logf("The cluster nodes is abnormal.")
return false
}
}
return true
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 3007e736-7ae4-4a35-8b75-517b99882dde | fileReplaceKeyword | ['"bufio"', '"os"', '"os/exec"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func fileReplaceKeyword(oldFileName string, newFileName string, keyWords string, varValue string) bool {
if newFileName == "" || newFileName == "" || keyWords == "" {
e2e.Failf("newFileName/newFileName/keyWords is null before replace.")
return false
}
oldFileStr, err := exec.Command("bash", "-c", "cat "+oldFileName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
newFileStr := strings.Replace(string(oldFileStr), keyWords, varValue, -1)
pf, errp := os.Create(newFileName)
o.Expect(errp).NotTo(o.HaveOccurred())
defer pf.Close()
w2 := bufio.NewWriter(pf)
_, perr := w2.WriteString(newFileStr)
w2.Flush()
o.Expect(perr).NotTo(o.HaveOccurred())
return true
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | d68d5208-9c29-46bc-b291-b4671f7903dd | waitForPodReady | ['"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func waitForPodReady(oc *exutil.CLI, ns string, labelStr string, waitSecond int32) {
err := wait.Poll(20*time.Second, time.Duration(waitSecond)*time.Second, func() (bool, error) {
output1, err := oc.AsAdmin().Run("get").Args("pods", "-l", labelStr, "-n", ns, "-o=jsonpath='{.items[*].status.phase}'").Output()
if err != nil {
e2e.Logf("Failed to get pod status, error: %s. Trying again", err)
return false, nil
}
statusList := strings.Fields(output1)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("Found etcd pod is not running")
return false, nil
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "pod with label "+labelStr+"is not Running.")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 27dfb488-a00b-4a52-aded-52af96489960 | getHostedClusterName | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/util.go | func getHostedClusterName(oc *exutil.CLI) (string, string) {
var clusterNs string
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("project", "-o=custom-columns=NAME:.metadata.name", "--no-headers").Output()
if err != nil || len(output) <= 0 {
e2e.Failf("Fail to get project name list.")
}
projectNameList := strings.Fields(output)
for i := 0; i < len(projectNameList); i++ {
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", projectNameList[i], "hc", "-ojsonpath={.items[*].metadata.name}").Output()
if len(output) > 0 {
e2e.Logf("hypershift hosted cluster namespace is: %s, hc name is %s", projectNameList[i], output)
clusterNs = projectNameList[i]
break
}
}
return clusterNs, output
} | disasterrecovery | ||||
test | openshift/openshift-tests-private | 1144719c-773e-4239-a8bb-917be72960c8 | etcd-defragment | import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd-defragment.go | package etcd
import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-etcd] ETCD", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("default-"+getRandomString(), exutil.KubeConfigPath())
// author: [email protected]
g.It("NonHyperShiftHOST-DEPRECATED-Author:mifiedle-High-43335-etcd data store will defragment and recover unused space [Slow] [Flaky]", func() {
g.By("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
g.By("Install and run etcd benchmark")
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "install", "-y", "git", "golang")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root && git clone --single-branch --branch release-3.5 https://github.com/etcd-io/etcd.git")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root/etcd/tools/benchmark && go build")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "remove", "-y", "git", "golang")
if err != nil {
e2e.Logf("Could not remove git and golang packages")
}
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "rm", "-rf", "/root/go", "/root/etcd")
if err != nil {
e2e.Logf("Could not remove test directories")
}
}()
//push data store over 400MBytes disk size
cmd := "/root/etcd/tools/benchmark/benchmark put " +
"--cacert /etc/kubernetes/static-pod-certs/configmaps/etcd-peer-client-ca/ca-bundle.crt " +
"--cert /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*crt " +
"--key /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*key " +
"--conns=100 --clients=200 --key-size=32 --sequential-keys --rate=8000 --total=250000 " +
"--val-size=4096 --target-leader"
start := time.Now()
output, err := exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], cmd)
duration := time.Since(start)
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("Benchmark result:\n%s", output))
// Check benchmark did not take too long
expected := 120
o.Expect(duration.Seconds()).Should(o.BeNumerically("<", expected), "Failed to run benchmark in under %d seconds", expected)
// Check prometheus metrics
prometheusURL := "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query?query="
// Get the monitoring token
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", "-n", "openshift-monitoring", "prometheus-k8s").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Allow etcd datastore to reach full size of ~1GB
g.By("Query etcd datastore size to ensure it grows over 1GB")
query := "avg(etcd_mvcc_db_total_size_in_use_in_bytes)<1000000000"
err = wait.Poll(60*time.Second, 120*time.Second, func() (bool, error) {
data := doPrometheusQuery(oc, token, prometheusURL, query)
if len(data.Data.Result) == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "etcd datastore did not grow over 1GB in 2 minutes")
g.By("Poll for etcd compaction every 60 seconds for up to 20 minutes")
//Check for datastore to go below 100MB in size
query = "avg(etcd_mvcc_db_total_size_in_use_in_bytes)>100000000"
err = wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
data := doPrometheusQuery(oc, token, prometheusURL, query)
if len(data.Data.Result) == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Compaction did not occur within 20 minutes")
})
})
| package etcd | ||||
test case | openshift/openshift-tests-private | 59b26d4c-169d-413a-9193-a3e7140f5e19 | NonHyperShiftHOST-DEPRECATED-Author:mifiedle-High-43335-etcd data store will defragment and recover unused space [Slow] [Flaky] | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd-defragment.go | g.It("NonHyperShiftHOST-DEPRECATED-Author:mifiedle-High-43335-etcd data store will defragment and recover unused space [Slow] [Flaky]", func() {
g.By("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
g.By("Install and run etcd benchmark")
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "install", "-y", "git", "golang")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root && git clone --single-branch --branch release-3.5 https://github.com/etcd-io/etcd.git")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root/etcd/tools/benchmark && go build")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "remove", "-y", "git", "golang")
if err != nil {
e2e.Logf("Could not remove git and golang packages")
}
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "rm", "-rf", "/root/go", "/root/etcd")
if err != nil {
e2e.Logf("Could not remove test directories")
}
}()
//push data store over 400MBytes disk size
cmd := "/root/etcd/tools/benchmark/benchmark put " +
"--cacert /etc/kubernetes/static-pod-certs/configmaps/etcd-peer-client-ca/ca-bundle.crt " +
"--cert /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*crt " +
"--key /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*key " +
"--conns=100 --clients=200 --key-size=32 --sequential-keys --rate=8000 --total=250000 " +
"--val-size=4096 --target-leader"
start := time.Now()
output, err := exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], cmd)
duration := time.Since(start)
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("Benchmark result:\n%s", output))
// Check benchmark did not take too long
expected := 120
o.Expect(duration.Seconds()).Should(o.BeNumerically("<", expected), "Failed to run benchmark in under %d seconds", expected)
// Check prometheus metrics
prometheusURL := "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query?query="
// Get the monitoring token
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", "-n", "openshift-monitoring", "prometheus-k8s").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Allow etcd datastore to reach full size of ~1GB
g.By("Query etcd datastore size to ensure it grows over 1GB")
query := "avg(etcd_mvcc_db_total_size_in_use_in_bytes)<1000000000"
err = wait.Poll(60*time.Second, 120*time.Second, func() (bool, error) {
data := doPrometheusQuery(oc, token, prometheusURL, query)
if len(data.Data.Result) == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "etcd datastore did not grow over 1GB in 2 minutes")
g.By("Poll for etcd compaction every 60 seconds for up to 20 minutes")
//Check for datastore to go below 100MB in size
query = "avg(etcd_mvcc_db_total_size_in_use_in_bytes)>100000000"
err = wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
data := doPrometheusQuery(oc, token, prometheusURL, query)
if len(data.Data.Result) == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Compaction did not occur within 20 minutes")
}) | |||||
test | openshift/openshift-tests-private | 34279bef-ed41-49a9-aae6-65738201c2b2 | etcdcheckperf | import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/etcd/etcdcheckperf.go | package etcd
import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-etcd] ETCD", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLIWithoutNamespace("openshift-etcd")
// author: [email protected]
g.It("NonHyperShiftHOST-DEPRECATED-Author:jgeorge-CPaasrunOnly-High-44199-run etcd benchmark [Exclusive]", func() {
var platform = exutil.CheckPlatform(oc)
rttTh := map[string]string{
"aws": "0.03",
"gcp": "0.06",
}
wallFsyncTh := map[string]string{
"aws": "0.04",
"gcp": "0.06",
}
if _, exists := rttTh[platform]; !exists {
g.Skip(fmt.Sprintf("Skip for non-supported platform: %s", platform))
}
g.By("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
g.By("Install and run etcd benchmark")
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "install", "-y", "git", "golang")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root && git clone --single-branch --branch release-3.5 https://github.com/etcd-io/etcd.git")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root/etcd/tools/benchmark && go build")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "remove", "-y", "git", "golang")
if err != nil {
e2e.Logf("Could not remove git and golang packages")
}
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "rm", "-rf", "/root/go", "/root/etcd")
if err != nil {
e2e.Logf("Could not remove test directories")
}
}()
cmd := "/root/etcd/tools/benchmark/benchmark put " +
"--cacert /etc/kubernetes/static-pod-certs/configmaps/etcd-peer-client-ca/ca-bundle.crt " +
"--cert /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*crt " +
"--key /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*key " +
"--conns=100 --clients=200 --key-size=32 --sequential-keys --rate=4000 --total=240000 " +
"--val-size=1024 --target-leader"
start := time.Now()
output, err := exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], cmd)
duration := time.Since(start)
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("Benchmark result:\n%s", output))
// Check benchmark did not take too long
expected := 90
o.Expect(duration.Seconds()).Should(o.BeNumerically("<", expected), "Failed to run benchmark in under %d seconds", expected)
// Check prometheus metrics
prometheusURL := "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query?query="
// Get the monitoring token
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", "-n", "openshift-monitoring", "prometheus-k8s").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Network RTT metric
query := fmt.Sprintf("histogram_quantile(0.99,(irate(etcd_network_peer_round_trip_time_seconds_bucket[1m])))>%s", rttTh[platform])
data := doPrometheusQuery(oc, token, prometheusURL, query)
o.Expect(len(data.Data.Result)).To(o.Equal(0))
// Disk commit duration
query = "histogram_quantile(0.99, irate(etcd_disk_backend_commit_duration_seconds_bucket[1m]))>0.03"
data = doPrometheusQuery(oc, token, prometheusURL, query)
o.Expect(len(data.Data.Result)).To(o.Equal(0))
// WAL fsync duration
query = fmt.Sprintf("histogram_quantile(0.999,(irate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])))>%s", wallFsyncTh[platform])
data = doPrometheusQuery(oc, token, prometheusURL, query)
o.Expect(len(data.Data.Result)).To(o.Equal(0))
})
})
| package etcd | ||||
test case | openshift/openshift-tests-private | 2613510c-9de0-482e-9766-6e1f61adda8c | NonHyperShiftHOST-DEPRECATED-Author:jgeorge-CPaasrunOnly-High-44199-run etcd benchmark [Exclusive] | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcdcheckperf.go | g.It("NonHyperShiftHOST-DEPRECATED-Author:jgeorge-CPaasrunOnly-High-44199-run etcd benchmark [Exclusive]", func() {
var platform = exutil.CheckPlatform(oc)
rttTh := map[string]string{
"aws": "0.03",
"gcp": "0.06",
}
wallFsyncTh := map[string]string{
"aws": "0.04",
"gcp": "0.06",
}
if _, exists := rttTh[platform]; !exists {
g.Skip(fmt.Sprintf("Skip for non-supported platform: %s", platform))
}
g.By("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
g.By("Install and run etcd benchmark")
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "install", "-y", "git", "golang")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root && git clone --single-branch --branch release-3.5 https://github.com/etcd-io/etcd.git")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], "cd /root/etcd/tools/benchmark && go build")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "dnf", "remove", "-y", "git", "golang")
if err != nil {
e2e.Logf("Could not remove git and golang packages")
}
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "rm", "-rf", "/root/go", "/root/etcd")
if err != nil {
e2e.Logf("Could not remove test directories")
}
}()
cmd := "/root/etcd/tools/benchmark/benchmark put " +
"--cacert /etc/kubernetes/static-pod-certs/configmaps/etcd-peer-client-ca/ca-bundle.crt " +
"--cert /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*crt " +
"--key /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-$(hostname).*key " +
"--conns=100 --clients=200 --key-size=32 --sequential-keys --rate=4000 --total=240000 " +
"--val-size=1024 --target-leader"
start := time.Now()
output, err := exutil.RemoteShPodWithBash(oc, "openshift-etcd", etcdPodList[0], cmd)
duration := time.Since(start)
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("Benchmark result:\n%s", output))
// Check benchmark did not take too long
expected := 90
o.Expect(duration.Seconds()).Should(o.BeNumerically("<", expected), "Failed to run benchmark in under %d seconds", expected)
// Check prometheus metrics
prometheusURL := "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query?query="
// Get the monitoring token
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", "-n", "openshift-monitoring", "prometheus-k8s").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Network RTT metric
query := fmt.Sprintf("histogram_quantile(0.99,(irate(etcd_network_peer_round_trip_time_seconds_bucket[1m])))>%s", rttTh[platform])
data := doPrometheusQuery(oc, token, prometheusURL, query)
o.Expect(len(data.Data.Result)).To(o.Equal(0))
// Disk commit duration
query = "histogram_quantile(0.99, irate(etcd_disk_backend_commit_duration_seconds_bucket[1m]))>0.03"
data = doPrometheusQuery(oc, token, prometheusURL, query)
o.Expect(len(data.Data.Result)).To(o.Equal(0))
// WAL fsync duration
query = fmt.Sprintf("histogram_quantile(0.999,(irate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])))>%s", wallFsyncTh[platform])
data = doPrometheusQuery(oc, token, prometheusURL, query)
o.Expect(len(data.Data.Result)).To(o.Equal(0))
}) | |||||
file | openshift/openshift-tests-private | 7d13caa4-6d52-4dda-aebb-3ac24e872f5e | util | import (
o "github.com/onsi/gomega"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strings"
"time"
"encoding/json"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | package etcd
import (
o "github.com/onsi/gomega"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strings"
"time"
"encoding/json"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// PrometheusQueryResult is struct
type PrometheusQueryResult struct {
Status string `json:"status"`
Data struct {
ResultType string `json:"resultType"`
Result []struct {
Metric struct {
To string `json:"To"`
Endpoint string `json:"endpoint"`
Instance string `json:"instance"`
Job string `json:"job"`
Namespace string `json:"namespace"`
Pod string `json:"pod"`
Service string `json:"service"`
} `json:"metric"`
Value []interface{} `json:"value"`
} `json:"result"`
} `json:"data"`
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func getNodeListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
}
func getPodListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-etcd", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podNameList := strings.Fields(output)
return podNameList
}
func getIpOfMasterNode(oc *exutil.CLI, labelKey string) string {
ipOfNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[0].status.addresses[0].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return ipOfNode
}
func verifyImageIDInDebugNode(oc *exutil.CLI, nodeNameList []string, imageID string, cVersion string) bool {
found := 0
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "oc", "adm", "release", "info", "--registry-config=/var/lib/kubelet/config.json", cVersion, "--image-for=etcd")
if strings.Contains(resultOutput, imageID) && err == nil {
e2e.Logf("Image %v successfully deployed on node %v", imageID, node)
found += 1
} else {
e2e.Logf("Failed to deploy image %v on node %v", imageID, node)
}
}
if found == len(nodeNameList) {
return true
} else {
return false
}
}
func verifySSLHealth(oc *exutil.CLI, ipOfNode string, node string) bool {
healthCheck := false
NodeIpAndPort := ipOfNode + ":9979"
resultOutput, _ := exutil.DebugNodeWithChroot(oc, node, "podman", "run", "--rm", "-ti", "docker.io/drwetter/testssl.sh:latest", NodeIpAndPort)
outputLines := strings.Split(resultOutput, "\n")
for _, eachLine := range outputLines {
if strings.Contains(eachLine, "SWEET32") && strings.Contains(eachLine, "not vulnerable (OK)") {
healthCheck = true
break
}
}
if healthCheck {
e2e.Logf("SWEET32 Vulnerability is secured")
} else {
e2e.Logf("SSL op %v ", resultOutput)
}
return healthCheck
}
func runDRBackup(oc *exutil.CLI, nodeNameList []string) (nodeName string, etcddb string) {
var nodeN, etcdDb string
for nodeindex, node := range nodeNameList {
backupout, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", oc.Namespace(), "node/"+node, "--", "chroot", "/host", "/usr/local/bin/cluster-backup.sh", "/home/core/assets/backup").Output()
if strings.Contains(backupout, "Snapshot saved at") && err == nil {
e2e.Logf("backup on master %v ", node)
regexp, _ := regexp.Compile("/home/core/assets/backup/snapshot.*db")
etcdDb = regexp.FindString(backupout)
nodeN = node
break
} else if err != nil && nodeindex < len(nodeNameList) {
e2e.Logf("Try for next master!")
} else {
e2e.Failf("Failed to run the backup!")
}
}
return nodeN, etcdDb
}
func doPrometheusQuery(oc *exutil.CLI, token string, url string, query string) PrometheusQueryResult {
var data PrometheusQueryResult
msg, _, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(
"-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "-i", "--",
"curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token),
fmt.Sprintf("%s%s", url, query)).Outputs()
if err != nil {
e2e.Failf("Failed Prometheus query, error: %v", err)
}
o.Expect(msg).NotTo(o.BeEmpty())
json.Unmarshal([]byte(msg), &data)
logPrometheusResult(data)
return data
}
func logPrometheusResult(data PrometheusQueryResult) {
if len(data.Data.Result) > 0 {
e2e.Logf("Unexpected metric values.")
for i, v := range data.Data.Result {
e2e.Logf(fmt.Sprintf("index: %d value: %s", i, v.Value[1].(string)))
}
}
}
func waitForMicroshiftAfterRestart(oc *exutil.CLI, nodename string) {
exutil.DebugNodeWithOptionsAndChroot(oc, nodename, []string{"-q"}, "bash", "-c", "systemctl restart microshift")
mStatusErr := wait.Poll(6*time.Second, 300*time.Second, func() (bool, error) {
output, _ := exutil.DebugNodeWithOptionsAndChroot(oc, nodename, []string{"-q"}, "bash", "-c", "systemctl status microshift")
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift status is: %v ", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(mStatusErr, fmt.Sprintf("Microshift failed to restart: %v", mStatusErr))
}
// make sure the PVC is Bound to the PV
func waitForPvcStatus(oc *exutil.CLI, namespace string, pvcname string) {
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
pvStatus, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pvc", pvcname, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if match, _ := regexp.MatchString("Bound", pvStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The PVC is not Bound as expected")
}
func waitForOneOffBackupToComplete(oc *exutil.CLI, namespace string, bkpname string) {
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
pvStatus, err := oc.AsAdmin().Run("get").Args("-n", namespace, "etcdbackup", bkpname, "-o=jsonpath='{.status.conditions[*].reason}'").Output()
if err != nil {
return false, err
}
if match, _ := regexp.MatchString("BackupCompleted", pvStatus); match {
e2e.Logf("OneOffBkpJob status is %v", pvStatus)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The BackupJob is not Completed as expected")
}
func getOneBackupFile(oc *exutil.CLI, namespace string, bkpname string) string {
bkpfile := ""
bkpmsg, err := oc.AsAdmin().Run("get").Args("-n", namespace, "etcdbackup", bkpname, "-o=jsonpath='{.status.conditions[*].message}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bkpmsgList := strings.Fields(bkpmsg)
for _, bmsg := range bkpmsgList {
if match, _ := regexp.MatchString("backup-test", bmsg); match {
e2e.Logf("backupfile is %v", bmsg)
bkpfile = bmsg
break
}
}
return bkpfile
}
func verifyBkpFileCreationHost(oc *exutil.CLI, nodeNameList []string, bkpPath string, bkpFile string) bool {
cmd := "ls -lrt " + bkpPath
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", cmd)
if strings.Contains(resultOutput, bkpFile) && err == nil {
e2e.Logf("OneOffBackupFile %v successfully verified on node %v", bkpFile, node)
return true
}
e2e.Logf("Trying for next node since BackupFile is not found on this node %v", node)
}
return false
}
func verifyEtcdClusterMsgStatus(oc *exutil.CLI, msg string, status string) bool {
etcdStatus, errSt := oc.AsAdmin().WithoutNamespace().Run("get").Args("etcd", "cluster", "-o=jsonpath='{.status.conditions[?(@.reason==\"BootstrapAlreadyRemoved\")].status}'").Output()
o.Expect(errSt).NotTo(o.HaveOccurred())
message, errMsg := oc.AsAdmin().WithoutNamespace().Run("get").Args("etcd", "cluster", "-o=jsonpath='{.status.conditions[?(@.reason==\"BootstrapAlreadyRemoved\")].message}'").Output()
o.Expect(errMsg).NotTo(o.HaveOccurred())
found := false
if strings.Contains(message, msg) && strings.Contains(etcdStatus, status) {
e2e.Logf("message is %v and status is %v", message, etcdStatus)
found = true
}
return found
}
func getIPStackType(oc *exutil.CLI) string {
svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
stack := ""
if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 {
stack = "dualstack"
} else if strings.Count(svcNetwork, ":") >= 2 {
stack = "ipv6single"
} else if strings.Count(svcNetwork, ".") >= 2 {
stack = "ipv4single"
}
return stack
}
func checkOperator(oc *exutil.CLI, operatorName string) {
err := wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", operatorName).Output()
if err != nil {
e2e.Logf("get clusteroperator err, will try next time:\n")
return false, nil
}
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal, will try next time:\n", operatorName)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator is abnormal")
}
func verifyRecurBkpFileCreationHost(oc *exutil.CLI, nodeNameList []string, bkpPath string, bkpFile string, count string) bool {
cmd := "ls -lrt " + bkpPath + " | grep " + bkpFile + " | wc "
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", cmd)
opWords := strings.Split(resultOutput, " ")
if strings.Contains(opWords[0], count) && err == nil {
e2e.Logf("Recurring %v successfully verified on node %v", bkpFile, node)
return true
}
e2e.Logf("Trying for next node since expected BackUp files are not found on this node %v", node)
}
return false
}
func waitForFirstBackupjobToSchedule(oc *exutil.CLI, namespace string, bkpodname string) string {
recurPod := ""
err := wait.Poll(20*time.Second, 120*time.Second, func() (bool, error) {
podNameOp, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pods", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
podNameList := strings.Fields(podNameOp)
for _, podName := range podNameList {
if strings.Contains(podName, bkpodname) && err == nil {
e2e.Logf("First RecurringBkpPod is %v", podName)
recurPod = podName
return true, nil
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The recurring Backup job is not scheduled")
return recurPod
}
func waitForRecurBackupJobToComplete(oc *exutil.CLI, namespace string, expectedPod string, expectedState string) {
firstSchPod := waitForFirstBackupjobToSchedule(oc, namespace, expectedPod)
err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pod", firstSchPod, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, expectedState) && err == nil {
e2e.Logf("firstSchPod %v is %v", firstSchPod, statusOp)
return true, nil
}
e2e.Logf("firstSchPod %v is %v, Trying again", firstSchPod, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The recurring Backup job is not completed")
}
func isCRDExisting(oc *exutil.CLI, crd string) bool {
output, err := oc.AsAdmin().Run("get").Args("CustomResourceDefinition", crd, "-o=jsonpath={.metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Compare(output, crd) == 0
}
func createCRD(oc *exutil.CLI, filename string) {
baseDir := exutil.FixturePath("testdata", "etcd")
crdTemplate := filepath.Join(baseDir, filename)
err := oc.AsAdmin().Run("create").Args("-f", crdTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Successfully created CRD")
}
// make sure all the ectd pods are running
func checkEtcdPodStatus(oc *exutil.CLI) bool {
output, err := oc.AsAdmin().Run("get").Args("pods", "-l", "app=etcd", "-n", "openshift-etcd", "-o=jsonpath='{.items[*].status.phase}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
statusList := strings.Fields(output)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("Find etcd pod is not running")
return false
}
}
return true
}
// make sure all the ectd operator pods are running
func checkEtcdOperatorPodStatus(oc *exutil.CLI) bool {
output, err := oc.AsAdmin().Run("get").Args("pods", "-n", "openshift-etcd-operator", "-o=jsonpath='{.items[*].status.phase}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
statusList := strings.Fields(output)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("etcd operator pod is not running")
return false
}
}
return true
}
// get the proxies
func getGlobalProxy(oc *exutil.CLI) (string, string) {
httpProxy, httperr := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpProxy}").Output()
o.Expect(httperr).NotTo(o.HaveOccurred())
httpsProxy, httsperr := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpsProxy}").Output()
o.Expect(httsperr).NotTo(o.HaveOccurred())
return httpProxy, httpsProxy
}
func verifyImageIDwithProxy(oc *exutil.CLI, nodeNameList []string, httpProxy string, httpsProxy string, imageID string, cVersion string) bool {
found := 0
cmd := "export http_proxy=" + httpProxy + ";export https_proxy=" + httpsProxy + ";oc adm release info --registry-config=/var/lib/kubelet/config.json " + cVersion + " --image-for=etcd"
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", cmd)
if strings.Contains(resultOutput, imageID) && err == nil {
e2e.Logf("Image %v successfully deployed on node %v", imageID, node)
found += 1
} else {
e2e.Logf("Failed to deploy Image %v on node %v", imageID, node)
}
}
if found == len(nodeNameList) {
return true
} else {
return false
}
}
func waitForPodStatus(oc *exutil.CLI, podName string, nameSpace string, podStatus string) {
err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", nameSpace, "pod", podName, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, podStatus) {
e2e.Logf("pod %v is %v", podName, podStatus)
return true, nil
}
e2e.Logf("Pod %v is %v, Trying again", podName, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The test pod job is not running")
}
func verifyBkpFileCreationOnExternalVol(oc *exutil.CLI, podName string, nameSpace string, bkpPath string, bkpFile string) bool {
resultOutput, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", nameSpace, podName, "bash", "-c", `ls -lrt `+bkpPath).Output()
if strings.Contains(resultOutput, bkpFile) && err == nil {
e2e.Logf("OneOffBackupFile %v successfully verified on exterval volume", bkpFile)
return true
} else {
e2e.Logf("OneOffBackupFile %v not found on exterval volume", bkpFile)
return false
}
}
func verifyRecurringBkpFileOnExternalVol(oc *exutil.CLI, podName string, nameSpace string, bkpPath string, bkpFile string, count string) bool {
cmd := "ls -lrt " + bkpPath + " | grep " + bkpFile + " | wc "
resultOutput, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", nameSpace, podName, "bash", "-c", cmd).Output()
e2e.Logf("resultOutput is %v", resultOutput)
opWords := strings.Split(resultOutput, " ")
if strings.Contains(opWords[0], count) && err == nil {
e2e.Logf("Recurring Backup successfully verified on external volume")
return true
}
return false
}
| package etcd | ||||
function | openshift/openshift-tests-private | 1f1f53fe-2ec3-447e-89b4-1dc5e7aa42f8 | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | etcd | ||||
function | openshift/openshift-tests-private | beb9d47c-8768-4c2f-a2a5-8c4e5169c5c2 | getNodeListByLabel | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getNodeListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
} | etcd | ||||
function | openshift/openshift-tests-private | e37d9ce9-09cb-445e-9be9-1a216bc0619e | getPodListByLabel | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getPodListByLabel(oc *exutil.CLI, labelKey string) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-etcd", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podNameList := strings.Fields(output)
return podNameList
} | etcd | ||||
function | openshift/openshift-tests-private | 10dff46e-34c3-46f0-9775-368e6637266e | getIpOfMasterNode | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getIpOfMasterNode(oc *exutil.CLI, labelKey string) string {
ipOfNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[0].status.addresses[0].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return ipOfNode
} | etcd | |||||
function | openshift/openshift-tests-private | 3d267244-18e8-4de5-b87c-b56cf72bff26 | verifyImageIDInDebugNode | ['"strings"', '"encoding/json"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyImageIDInDebugNode(oc *exutil.CLI, nodeNameList []string, imageID string, cVersion string) bool {
found := 0
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "oc", "adm", "release", "info", "--registry-config=/var/lib/kubelet/config.json", cVersion, "--image-for=etcd")
if strings.Contains(resultOutput, imageID) && err == nil {
e2e.Logf("Image %v successfully deployed on node %v", imageID, node)
found += 1
} else {
e2e.Logf("Failed to deploy image %v on node %v", imageID, node)
}
}
if found == len(nodeNameList) {
return true
} else {
return false
}
} | etcd | ||||
function | openshift/openshift-tests-private | fc3a2514-abad-41d9-8fb8-0f7b6930e450 | verifySSLHealth | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifySSLHealth(oc *exutil.CLI, ipOfNode string, node string) bool {
healthCheck := false
NodeIpAndPort := ipOfNode + ":9979"
resultOutput, _ := exutil.DebugNodeWithChroot(oc, node, "podman", "run", "--rm", "-ti", "docker.io/drwetter/testssl.sh:latest", NodeIpAndPort)
outputLines := strings.Split(resultOutput, "\n")
for _, eachLine := range outputLines {
if strings.Contains(eachLine, "SWEET32") && strings.Contains(eachLine, "not vulnerable (OK)") {
healthCheck = true
break
}
}
if healthCheck {
e2e.Logf("SWEET32 Vulnerability is secured")
} else {
e2e.Logf("SSL op %v ", resultOutput)
}
return healthCheck
} | etcd | ||||
function | openshift/openshift-tests-private | d8374d23-d7c2-4ad6-a4fe-1fdd63d9e4ba | runDRBackup | ['"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func runDRBackup(oc *exutil.CLI, nodeNameList []string) (nodeName string, etcddb string) {
var nodeN, etcdDb string
for nodeindex, node := range nodeNameList {
backupout, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", oc.Namespace(), "node/"+node, "--", "chroot", "/host", "/usr/local/bin/cluster-backup.sh", "/home/core/assets/backup").Output()
if strings.Contains(backupout, "Snapshot saved at") && err == nil {
e2e.Logf("backup on master %v ", node)
regexp, _ := regexp.Compile("/home/core/assets/backup/snapshot.*db")
etcdDb = regexp.FindString(backupout)
nodeN = node
break
} else if err != nil && nodeindex < len(nodeNameList) {
e2e.Logf("Try for next master!")
} else {
e2e.Failf("Failed to run the backup!")
}
}
return nodeN, etcdDb
} | etcd | ||||
function | openshift/openshift-tests-private | 2deec8f8-ef32-40f1-94b4-a4062bcf9f45 | doPrometheusQuery | ['"fmt"', '"encoding/json"'] | ['PrometheusQueryResult'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func doPrometheusQuery(oc *exutil.CLI, token string, url string, query string) PrometheusQueryResult {
var data PrometheusQueryResult
msg, _, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(
"-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "-i", "--",
"curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token),
fmt.Sprintf("%s%s", url, query)).Outputs()
if err != nil {
e2e.Failf("Failed Prometheus query, error: %v", err)
}
o.Expect(msg).NotTo(o.BeEmpty())
json.Unmarshal([]byte(msg), &data)
logPrometheusResult(data)
return data
} | etcd | |||
function | openshift/openshift-tests-private | b3d3d136-3b86-4aa2-a3df-376c39c43c3c | logPrometheusResult | ['"fmt"'] | ['PrometheusQueryResult'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func logPrometheusResult(data PrometheusQueryResult) {
if len(data.Data.Result) > 0 {
e2e.Logf("Unexpected metric values.")
for i, v := range data.Data.Result {
e2e.Logf(fmt.Sprintf("index: %d value: %s", i, v.Value[1].(string)))
}
}
} | etcd | |||
function | openshift/openshift-tests-private | 1f5bf2b1-1567-421b-b5d9-82e2a589f194 | waitForMicroshiftAfterRestart | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func waitForMicroshiftAfterRestart(oc *exutil.CLI, nodename string) {
exutil.DebugNodeWithOptionsAndChroot(oc, nodename, []string{"-q"}, "bash", "-c", "systemctl restart microshift")
mStatusErr := wait.Poll(6*time.Second, 300*time.Second, func() (bool, error) {
output, _ := exutil.DebugNodeWithOptionsAndChroot(oc, nodename, []string{"-q"}, "bash", "-c", "systemctl status microshift")
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift status is: %v ", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(mStatusErr, fmt.Sprintf("Microshift failed to restart: %v", mStatusErr))
} | etcd | ||||
function | openshift/openshift-tests-private | c60ade62-c635-4872-aa7e-057b99242268 | waitForPvcStatus | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func waitForPvcStatus(oc *exutil.CLI, namespace string, pvcname string) {
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
pvStatus, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pvc", pvcname, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if match, _ := regexp.MatchString("Bound", pvStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The PVC is not Bound as expected")
} | etcd | ||||
function | openshift/openshift-tests-private | c4186e85-bc89-4cae-acac-d45523eff6b4 | waitForOneOffBackupToComplete | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func waitForOneOffBackupToComplete(oc *exutil.CLI, namespace string, bkpname string) {
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
pvStatus, err := oc.AsAdmin().Run("get").Args("-n", namespace, "etcdbackup", bkpname, "-o=jsonpath='{.status.conditions[*].reason}'").Output()
if err != nil {
return false, err
}
if match, _ := regexp.MatchString("BackupCompleted", pvStatus); match {
e2e.Logf("OneOffBkpJob status is %v", pvStatus)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The BackupJob is not Completed as expected")
} | etcd | ||||
function | openshift/openshift-tests-private | 30d052ee-40f0-448a-91aa-bad4efd7f212 | getOneBackupFile | ['"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getOneBackupFile(oc *exutil.CLI, namespace string, bkpname string) string {
bkpfile := ""
bkpmsg, err := oc.AsAdmin().Run("get").Args("-n", namespace, "etcdbackup", bkpname, "-o=jsonpath='{.status.conditions[*].message}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bkpmsgList := strings.Fields(bkpmsg)
for _, bmsg := range bkpmsgList {
if match, _ := regexp.MatchString("backup-test", bmsg); match {
e2e.Logf("backupfile is %v", bmsg)
bkpfile = bmsg
break
}
}
return bkpfile
} | etcd | ||||
function | openshift/openshift-tests-private | 3b794444-1ab1-4fad-9984-0ba52c4f657e | verifyBkpFileCreationHost | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyBkpFileCreationHost(oc *exutil.CLI, nodeNameList []string, bkpPath string, bkpFile string) bool {
cmd := "ls -lrt " + bkpPath
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", cmd)
if strings.Contains(resultOutput, bkpFile) && err == nil {
e2e.Logf("OneOffBackupFile %v successfully verified on node %v", bkpFile, node)
return true
}
e2e.Logf("Trying for next node since BackupFile is not found on this node %v", node)
}
return false
} | etcd | ||||
function | openshift/openshift-tests-private | 379f7867-574e-4262-8861-c5710a00571e | verifyEtcdClusterMsgStatus | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyEtcdClusterMsgStatus(oc *exutil.CLI, msg string, status string) bool {
etcdStatus, errSt := oc.AsAdmin().WithoutNamespace().Run("get").Args("etcd", "cluster", "-o=jsonpath='{.status.conditions[?(@.reason==\"BootstrapAlreadyRemoved\")].status}'").Output()
o.Expect(errSt).NotTo(o.HaveOccurred())
message, errMsg := oc.AsAdmin().WithoutNamespace().Run("get").Args("etcd", "cluster", "-o=jsonpath='{.status.conditions[?(@.reason==\"BootstrapAlreadyRemoved\")].message}'").Output()
o.Expect(errMsg).NotTo(o.HaveOccurred())
found := false
if strings.Contains(message, msg) && strings.Contains(etcdStatus, status) {
e2e.Logf("message is %v and status is %v", message, etcdStatus)
found = true
}
return found
} | etcd | ||||
function | openshift/openshift-tests-private | 55fc48a5-c638-47e6-9913-08e258ba5fbe | getIPStackType | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getIPStackType(oc *exutil.CLI) string {
svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
stack := ""
if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 {
stack = "dualstack"
} else if strings.Count(svcNetwork, ":") >= 2 {
stack = "ipv6single"
} else if strings.Count(svcNetwork, ".") >= 2 {
stack = "ipv4single"
}
return stack
} | etcd | ||||
function | openshift/openshift-tests-private | 3ff2eac0-dab1-40bb-b411-fcbc97b5db00 | checkOperator | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func checkOperator(oc *exutil.CLI, operatorName string) {
err := wait.Poll(60*time.Second, 1500*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", operatorName).Output()
if err != nil {
e2e.Logf("get clusteroperator err, will try next time:\n")
return false, nil
}
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal, will try next time:\n", operatorName)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "clusteroperator is abnormal")
} | etcd | ||||
function | openshift/openshift-tests-private | 367ae724-7e7c-4b4a-bd25-30eae3d225a6 | verifyRecurBkpFileCreationHost | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyRecurBkpFileCreationHost(oc *exutil.CLI, nodeNameList []string, bkpPath string, bkpFile string, count string) bool {
cmd := "ls -lrt " + bkpPath + " | grep " + bkpFile + " | wc "
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", cmd)
opWords := strings.Split(resultOutput, " ")
if strings.Contains(opWords[0], count) && err == nil {
e2e.Logf("Recurring %v successfully verified on node %v", bkpFile, node)
return true
}
e2e.Logf("Trying for next node since expected BackUp files are not found on this node %v", node)
}
return false
} | etcd | ||||
function | openshift/openshift-tests-private | b5f9a725-aa08-4e1d-b515-112b46026a06 | waitForFirstBackupjobToSchedule | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func waitForFirstBackupjobToSchedule(oc *exutil.CLI, namespace string, bkpodname string) string {
recurPod := ""
err := wait.Poll(20*time.Second, 120*time.Second, func() (bool, error) {
podNameOp, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pods", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
podNameList := strings.Fields(podNameOp)
for _, podName := range podNameList {
if strings.Contains(podName, bkpodname) && err == nil {
e2e.Logf("First RecurringBkpPod is %v", podName)
recurPod = podName
return true, nil
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The recurring Backup job is not scheduled")
return recurPod
} | etcd | ||||
function | openshift/openshift-tests-private | f79df5d1-8179-4283-8529-25033ec9b130 | waitForRecurBackupJobToComplete | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func waitForRecurBackupJobToComplete(oc *exutil.CLI, namespace string, expectedPod string, expectedState string) {
firstSchPod := waitForFirstBackupjobToSchedule(oc, namespace, expectedPod)
err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pod", firstSchPod, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, expectedState) && err == nil {
e2e.Logf("firstSchPod %v is %v", firstSchPod, statusOp)
return true, nil
}
e2e.Logf("firstSchPod %v is %v, Trying again", firstSchPod, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The recurring Backup job is not completed")
} | etcd | ||||
function | openshift/openshift-tests-private | a2c0c405-0e11-46cf-a1fa-0c7899b705ec | isCRDExisting | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func isCRDExisting(oc *exutil.CLI, crd string) bool {
output, err := oc.AsAdmin().Run("get").Args("CustomResourceDefinition", crd, "-o=jsonpath={.metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Compare(output, crd) == 0
} | etcd | ||||
function | openshift/openshift-tests-private | a70f028c-176b-4fc7-acdd-f6da602b774b | createCRD | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func createCRD(oc *exutil.CLI, filename string) {
baseDir := exutil.FixturePath("testdata", "etcd")
crdTemplate := filepath.Join(baseDir, filename)
err := oc.AsAdmin().Run("create").Args("-f", crdTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Successfully created CRD")
} | etcd | ||||
function | openshift/openshift-tests-private | bff0727e-c6d5-471a-be7e-7ee357e640ec | checkEtcdPodStatus | ['"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func checkEtcdPodStatus(oc *exutil.CLI) bool {
output, err := oc.AsAdmin().Run("get").Args("pods", "-l", "app=etcd", "-n", "openshift-etcd", "-o=jsonpath='{.items[*].status.phase}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
statusList := strings.Fields(output)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("Find etcd pod is not running")
return false
}
}
return true
} | etcd | ||||
function | openshift/openshift-tests-private | 374c1e14-ab23-4924-9e7b-1cfee5993b9c | checkEtcdOperatorPodStatus | ['"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func checkEtcdOperatorPodStatus(oc *exutil.CLI) bool {
output, err := oc.AsAdmin().Run("get").Args("pods", "-n", "openshift-etcd-operator", "-o=jsonpath='{.items[*].status.phase}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
statusList := strings.Fields(output)
for _, podStatus := range statusList {
if match, _ := regexp.MatchString("Running", podStatus); !match {
e2e.Logf("etcd operator pod is not running")
return false
}
}
return true
} | etcd | ||||
function | openshift/openshift-tests-private | 8dced91a-c3c0-4227-9aa0-68d3ea7732b7 | getGlobalProxy | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func getGlobalProxy(oc *exutil.CLI) (string, string) {
httpProxy, httperr := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpProxy}").Output()
o.Expect(httperr).NotTo(o.HaveOccurred())
httpsProxy, httsperr := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpsProxy}").Output()
o.Expect(httsperr).NotTo(o.HaveOccurred())
return httpProxy, httpsProxy
} | etcd | |||||
function | openshift/openshift-tests-private | a8fcf2e8-f951-4f1a-8fe5-a556fba5bbb3 | verifyImageIDwithProxy | ['"strings"', '"encoding/json"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyImageIDwithProxy(oc *exutil.CLI, nodeNameList []string, httpProxy string, httpsProxy string, imageID string, cVersion string) bool {
found := 0
cmd := "export http_proxy=" + httpProxy + ";export https_proxy=" + httpsProxy + ";oc adm release info --registry-config=/var/lib/kubelet/config.json " + cVersion + " --image-for=etcd"
for _, node := range nodeNameList {
resultOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", cmd)
if strings.Contains(resultOutput, imageID) && err == nil {
e2e.Logf("Image %v successfully deployed on node %v", imageID, node)
found += 1
} else {
e2e.Logf("Failed to deploy Image %v on node %v", imageID, node)
}
}
if found == len(nodeNameList) {
return true
} else {
return false
}
} | etcd | ||||
function | openshift/openshift-tests-private | 520abe2f-4604-43fd-9159-a84e17cbd048 | waitForPodStatus | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func waitForPodStatus(oc *exutil.CLI, podName string, nameSpace string, podStatus string) {
err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", nameSpace, "pod", podName, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, podStatus) {
e2e.Logf("pod %v is %v", podName, podStatus)
return true, nil
}
e2e.Logf("Pod %v is %v, Trying again", podName, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The test pod job is not running")
} | etcd | ||||
function | openshift/openshift-tests-private | 154ad46a-0fb0-4850-8fb3-66d8b78469f4 | verifyBkpFileCreationOnExternalVol | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyBkpFileCreationOnExternalVol(oc *exutil.CLI, podName string, nameSpace string, bkpPath string, bkpFile string) bool {
resultOutput, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", nameSpace, podName, "bash", "-c", `ls -lrt `+bkpPath).Output()
if strings.Contains(resultOutput, bkpFile) && err == nil {
e2e.Logf("OneOffBackupFile %v successfully verified on exterval volume", bkpFile)
return true
} else {
e2e.Logf("OneOffBackupFile %v not found on exterval volume", bkpFile)
return false
}
} | etcd | ||||
function | openshift/openshift-tests-private | 721bd2de-4974-4dd2-ba4e-2142f88ed349 | verifyRecurringBkpFileOnExternalVol | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/util.go | func verifyRecurringBkpFileOnExternalVol(oc *exutil.CLI, podName string, nameSpace string, bkpPath string, bkpFile string, count string) bool {
cmd := "ls -lrt " + bkpPath + " | grep " + bkpFile + " | wc "
resultOutput, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", nameSpace, podName, "bash", "-c", cmd).Output()
e2e.Logf("resultOutput is %v", resultOutput)
opWords := strings.Split(resultOutput, " ")
if strings.Contains(opWords[0], count) && err == nil {
e2e.Logf("Recurring Backup successfully verified on external volume")
return true
}
return false
} | etcd | ||||
test | openshift/openshift-tests-private | ed45fb28-0c82-4509-920f-d458e7dcfeb2 | etcd_tests | import (
"bufio"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | package etcd
import (
"bufio"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-etcd] ETCD", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("default-"+getRandomString(), exutil.KubeConfigPath())
// author: [email protected]
g.It("NonHyperShiftHOST-Author:skundu-Critical-43330-Ensure a safety net for the 3.4 to 3.5 etcd upgrade", func() {
var (
err error
msg string
)
g.By("Test for case OCP-43330 Ensure a safety net for the 3.4 to 3.5 etcd upgrade")
oc.SetupProject()
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
e2e.Logf("verify whether etcd version is 3.5")
output, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "etcdctl")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("3.5"))
e2e.Logf("get the Kubernetes version")
version, err := exec.Command("bash", "-c", "oc version | grep Kubernetes |awk '{print $3}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sVersion := string(version)
kubeVer := strings.Split(sVersion, "+")[0]
kubeVer = strings.TrimSpace(kubeVer)
// Sometimes, there will be a version difference between kubeletVersion and k8s version due to RHCOS version.
// It will be matching once there is a new RHCOS version. Detail see https://issues.redhat.com/browse/OCPBUGS-48612
pattern := regexp.MustCompile(`\S+?.\d+`)
validVer := pattern.FindAllString(kubeVer, -1)
e2e.Logf("Version considered is %v", validVer[0])
e2e.Logf("retrieve all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("verify the kubelet version in node details")
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNodeList[0], "-o", "custom-columns=VERSION:.status.nodeInfo.kubeletVersion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring(validVer[0]))
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:geliu-Medium-52418-Add new parameter to avoid Potential etcd inconsistent revision and data occurs", func() {
g.By("Test for case OCP-52418-Add new parameter to avoid Potential etcd inconsistent revision and data occurs")
oc.SetupProject()
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
e2e.Logf("get the expected parameter from etcd member pod")
output, err := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[*].command[*]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("experimental-initial-corrupt-check=true"))
})
// author: [email protected]
g.It("Author:skundu-NonHyperShiftHOST-WRS-LEVEL0-NonPreRelease-Longduration-Critical-52312-V-ACS.03-cluster-backup.sh script has a conflict to use /etc/kubernetes/static-pod-certs folder [Serial]", func() {
g.By("Test for case OCP-52312 cluster-backup.sh script has a conflict to use /etc/kubernetes/static-pod-certs folder.")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
defer func() {
e2e.Logf("Remove the certs directory")
_, errCert := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "rm", "-rf", "/etc/kubernetes/static-pod-certs")
o.Expect(errCert).NotTo(o.HaveOccurred())
}()
e2e.Logf("Create the certs directory")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "mkdir", "/etc/kubernetes/static-pod-certs")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
e2e.Logf("Remove the backup directory")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "rm", "-rf", "/home/core/assets/backup")
o.Expect(err).NotTo(o.HaveOccurred())
}()
firstMNode := []string{masterNodeList[0]}
e2e.Logf("Run the backup")
masterN, _ := runDRBackup(oc, firstMNode)
e2e.Logf("Etcd db successfully backed up on node %v", masterN)
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-57119-SSL/TLS: Birthday attack against 64 bit block ciphers (SWEET32) etcd metrics port 9979 [Serial]", func() {
g.By("Test for case OCP-57119 SSL/TLS: Birthday attack against 64 bit block ciphers (SWEET32) etcd metrics port 9979 .")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("get the ip of the first node")
ipOfNode := getIpOfMasterNode(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Node IP %v", ipOfNode)
e2e.Logf("Verify the SSL Health of port 9979")
res := verifySSLHealth(oc, ipOfNode, masterNodeList[0])
if res {
e2e.Logf("SSL health on port 9979 is healthy.")
} else {
e2e.Failf("SSL health on port 9979 is vulnerable")
}
})
// author: [email protected]
g.It("Author:skundu-NonHyperShiftHOST-WRS-LEVEL0-Critical-24280-V-ACS.03-Etcd basic verification", func() {
g.By("Test for case OCP-52418-Etcd basic verification")
e2e.Logf("check cluster Etcd operator status")
checkOperator(oc, "etcd")
e2e.Logf("verify cluster Etcd operator pod is Running")
podOprtAllRunning := checkEtcdOperatorPodStatus(oc)
if podOprtAllRunning != true {
e2e.Failf("etcd operator pod is not in running state")
}
e2e.Logf("retrieve all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
if len(masterNodeList) != len(etcdPodList) {
e2e.Failf("mismatch in the number of etcd pods and master nodes.")
}
e2e.Logf("Ensure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc)
if podAllRunning != true {
e2e.Failf("etcd pods are not in running state")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:geliu-Critical-54129-New etcd alerts to be added to the monitoring stack in ocp 4.10.", func() {
g.By("Test for case OCP-54129-New etcd alerts to be added to the monitoring stack in ocp 4.10.")
e2e.Logf("Check new alert msg have been updated")
output, err := exec.Command("bash", "-c", "oc -n openshift-monitoring get cm prometheus-k8s-rulefiles-0 -oyaml | grep \"alert: etcd\"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("etcdHighFsyncDurations"))
o.Expect(output).To(o.ContainSubstring("etcdDatabaseQuotaLowSpace"))
o.Expect(output).To(o.ContainSubstring("etcdExcessiveDatabaseGrowth"))
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:skundu-Critical-73564-Validate cert rotation in 4.16. [Disruptive]", func() {
exutil.By("Test for case OCP-73564-Validate cert rotation in 4.16.")
e2e.Logf("Check the lifetime of newly created signer certificate in openshift-etcd namespace")
filename := "73564_out.json"
initialexpiry, _ := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
e2e.Logf("initial expiry is: %v ", initialexpiry)
e2e.Logf("Recreate the signer by deleting it")
defer func() {
checkOperator(oc, "etcd")
checkOperator(oc, "kube-apiserver")
}()
_, errdel := oc.AsAdmin().Run("delete").Args("-n", "openshift-etcd", "secret", "etcd-signer").Output()
o.Expect(errdel).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
checkOperator(oc, "kube-apiserver")
e2e.Logf("Verify the newly created expiry time is differnt from the initial one")
newexpiry, _ := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
e2e.Logf("renewed expiry is: %v ", newexpiry)
if initialexpiry == newexpiry {
e2e.Failf("The signer cert expiry did n't renew")
}
e2e.Logf("Once the revision with the updated bundle is rolled out, swap the original CA in the openshift-config namespace, with the newly rotated one in openshift-etcd")
out, _ := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-ojson").OutputToFile(filename)
jqCmd := fmt.Sprintf(`cat %s | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' > /tmp/73564.yaml`, out)
_, errex := exec.Command("bash", "-c", jqCmd).Output()
e2e.Logf("jqcmd is %v", jqCmd)
o.Expect(errex).NotTo(o.HaveOccurred())
defer os.RemoveAll("/tmp/73564.yaml")
_, errj := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-n", "openshift-config", "-f", "/tmp/73564.yaml").Output()
o.Expect(errj).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
checkOperator(oc, "kube-apiserver")
e2e.Logf("Remove old CA from the trust bundle. This will regenerate the bundle with only the signer certificates from openshift-config and openshift-etcd, effectively removing all unknown/old public keys.")
_, err := oc.AsAdmin().Run("delete").Args("-n", "openshift-etcd", "configmap", "etcd-ca-bundle").Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("NonHyperShiftHOST-PstChkUpgrade-ConnectedOnly-Author:skundu-NonPreRelease-Critical-22665-Check etcd image have been update to target release value after upgrade [Serial]", func() {
g.By("Test for case OCP-22665 Check etcd image have been update to target release value after upgrade.")
var (
errImg error
etcdImageID string
)
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
e2e.Logf("get the image id from the etcd pod")
etcdImageID, errImg = oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.status.containerStatuses[?(@.name==\"etcd\")].image}").Output()
o.Expect(errImg).NotTo(o.HaveOccurred())
e2e.Logf("etcd imagid is %v", etcdImageID)
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("get the clusterVersion")
clusterVersion, errClvr := oc.AsAdmin().Run("get").Args("clusterversions", "version", "-o=jsonpath={.status.desired.image}").Output()
o.Expect(errClvr).NotTo(o.HaveOccurred())
e2e.Logf("clusterVersion is %v", clusterVersion)
httpProxy, httpsProxy := getGlobalProxy(oc)
if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") {
e2e.Logf("It's a proxy platform.")
ret := verifyImageIDwithProxy(oc, masterNodeList, httpProxy, httpsProxy, etcdImageID, clusterVersion)
if ret {
e2e.Logf("Image version of etcd successfully updated to the target release on all the node(s) of cluster with proxy")
} else {
e2e.Failf("etcd Image update to target release on proxy cluster failed")
}
} else {
g.By("Run the command on node(s)")
res := verifyImageIDInDebugNode(oc, masterNodeList, etcdImageID, clusterVersion)
if res {
e2e.Logf("Image version of etcd successfully updated to the target release on all the node(s)")
} else {
e2e.Failf("etcd Image update to target release failed")
}
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-64148-Verify etcd-bootstrap member is removed properly [Serial]", func() {
g.By("Test for case OCP-64148 Verify etcd-bootstrap member is removed properly.")
g.By("Verifying etcd cluster message and status")
res := verifyEtcdClusterMsgStatus(oc, "etcd-bootstrap member is already removed", "True")
if res {
e2e.Logf("etcd bootstrap member successfully removed")
} else {
e2e.Failf("failed to remove the etcd bootstrap member")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66726-Automated one-off backup for etcd using PVC on hostpath. [Disruptive]", func() {
g.By("Test for case OCP-66726 Automated one-off backup for etcd using PVC on hostpath.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvName = "etcd-backup-pv-h-66726"
pvcName = "etcd-backup-pvc-h-66726"
bkphostpath = "/etc/kubernetes/cluster-backup"
etcdBkp = "testbackup-h-66726"
nameSpace = "openshift-etcd"
pvYamlFile = tmpdir + "pv-hostpath.yaml"
pvcYamlFile = tmpdir + "pvc-hostpath.yaml"
oneOffBkphpYamlFile = tmpdir + "oneOffbkp-hostpath.yaml"
pvYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolume
metadata:
name: %s
spec:
storageClassName: manual
capacity:
storage: %s
accessModes:
- ReadWriteOnce
hostPath:
path: %s
`, pvName, "10Gi", bkphostpath)
pvcYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: %s
namespace: openshift-etcd
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: %s
volumeName: %s
`, pvcName, "10Gi", pvName)
oneOffBkphpYaml = fmt.Sprintf(`apiVersion: operator.openshift.io/v1alpha1
kind: EtcdBackup
metadata:
name: %s
namespace: openshift-etcd
spec:
pvcName: %s`, etcdBkp, pvcName)
)
g.By("2 Create a PV for hostpath")
f, err := os.Create(pvYamlFile)
o.Expect(err).NotTo(o.HaveOccurred())
defer f.Close()
w := bufio.NewWriter(f)
_, werr := w.WriteString(pvYaml)
w.Flush()
o.Expect(werr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvYamlFile).Execute()
pvErr := oc.AsAdmin().Run("create").Args("-f", pvYamlFile).Execute()
o.Expect(pvErr).NotTo(o.HaveOccurred())
g.By("3 Create a PVC for hostpath")
pf, errp := os.Create(pvcYamlFile)
o.Expect(errp).NotTo(o.HaveOccurred())
defer pf.Close()
w2 := bufio.NewWriter(pf)
_, perr := w2.WriteString(pvcYaml)
w2.Flush()
o.Expect(perr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
pvcErr := oc.AsAdmin().Run("create").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
o.Expect(pvcErr).NotTo(o.HaveOccurred())
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("4. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("5 Create a oneOffBackup for hostpath")
bkpf, bkperr := os.Create(oneOffBkphpYamlFile)
o.Expect(bkperr).NotTo(o.HaveOccurred())
defer bkpf.Close()
w3 := bufio.NewWriter(bkpf)
_, bwerr := w3.WriteString(oneOffBkphpYaml)
w3.Flush()
o.Expect(bwerr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", oneOffBkphpYamlFile).Execute()
bkpErr := oc.AsAdmin().Run("create").Args("-f", oneOffBkphpYamlFile).Execute()
o.Expect(bkpErr).NotTo(o.HaveOccurred())
waitForOneOffBackupToComplete(oc, nameSpace, etcdBkp)
backupfile := getOneBackupFile(oc, nameSpace, etcdBkp)
o.Expect(backupfile).NotTo(o.BeEmpty(), "Failed to get the Backup file")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Verify the backup creation")
verify := verifyBkpFileCreationHost(oc, masterNodeList, bkphostpath, backupfile)
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on node")
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66727-Automated recurring backup for etcd using PVC on hostpath. [Disruptive]", func() {
g.By("Test for case OCP-66727 Automated recurring backup for etcd using PVC on hostpath.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66727" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvName = "etcd-backup-pv-h-66727"
pvcName = "etcd-backup-pvc-h-66727"
bkphostpath = "/etc/kubernetes/cluster-backup"
etcdBkp = "testbackup-h-66727"
maxNoBackup = 3
nameSpace = "openshift-etcd"
pvYamlFile = tmpdir + "pv-hostpath.yaml"
pvcYamlFile = tmpdir + "pvc-hostpath.yaml"
recurringBkphpYamlFile = tmpdir + "recurringBkp-hostpath.yaml"
pvYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolume
metadata:
name: %s
spec:
storageClassName: manual
capacity:
storage: %s
accessModes:
- ReadWriteOnce
hostPath:
path: %s
`, pvName, "10Gi", bkphostpath)
pvcYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: %s
namespace: openshift-etcd
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: %s
volumeName: %s
`, pvcName, "10Gi", pvName)
recurringBkphpYaml = fmt.Sprintf(`apiVersion: config.openshift.io/v1alpha1
kind: Backup
metadata:
name: %s
spec:
etcd:
schedule: "*/1 * * * *"
timeZone: "UTC"
retentionPolicy:
retentionType: RetentionNumber
retentionNumber:
maxNumberOfBackups: %d
pvcName: %s`, etcdBkp, maxNoBackup, pvcName)
)
g.By("2 Create a PV for hostpath")
f, err := os.Create(pvYamlFile)
o.Expect(err).NotTo(o.HaveOccurred())
defer f.Close()
w := bufio.NewWriter(f)
_, werr := w.WriteString(pvYaml)
w.Flush()
o.Expect(werr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvYamlFile).Execute()
pvErr := oc.AsAdmin().Run("create").Args("-f", pvYamlFile).Execute()
o.Expect(pvErr).NotTo(o.HaveOccurred())
g.By("3 Create a PVC for hostpath")
pf, errp := os.Create(pvcYamlFile)
o.Expect(errp).NotTo(o.HaveOccurred())
defer pf.Close()
w2 := bufio.NewWriter(pf)
_, perr := w2.WriteString(pvcYaml)
w2.Flush()
o.Expect(perr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
pvcErr := oc.AsAdmin().Run("create").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
o.Expect(pvcErr).NotTo(o.HaveOccurred())
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("4. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("5 Create a recurringBackup for hostpath")
bkpf, bkperr := os.Create(recurringBkphpYamlFile)
o.Expect(bkperr).NotTo(o.HaveOccurred())
defer bkpf.Close()
w3 := bufio.NewWriter(bkpf)
_, bwerr := w3.WriteString(recurringBkphpYaml)
w3.Flush()
o.Expect(bwerr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", recurringBkphpYamlFile).Execute()
bkpErr := oc.AsAdmin().Run("create").Args("-f", recurringBkphpYamlFile).Execute()
o.Expect(bkpErr).NotTo(o.HaveOccurred())
waitForRecurBackupJobToComplete(oc, nameSpace, etcdBkp, "Succeeded")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Need to wait for 3 minutes as 3 jobs are scheduled after every 1 minute each")
time.Sleep(180 * time.Second)
e2e.Logf("Verify the backup creation")
verify := verifyRecurBkpFileCreationHost(oc, masterNodeList, bkphostpath, "backup-"+etcdBkp, "4")
o.Expect(verify).To(o.BeTrue(), "Failed to verify recurring backup files on node")
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66716-Automated one-off backup for etcd using dynamically provisioned PV externally. [Disruptive]", func() {
g.By("Test for case OCP-66716 Automated one-off backup for etcd using dynamically provisioned PV externally.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
output, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
platform := strings.ToLower(output)
storageCn := ""
if platform == "aws" {
storageCn = "gp3-csi"
} else if platform == "azure" {
storageCn = "azurefile-csi"
} else if platform == "gcp" {
storageCn = "standard-csi"
} else {
g.Skip("this platform is currently not supported, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66716" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvcName = "etcd-backup-pvc-e-66716"
podName = "test-pod-66716"
bkpPath = "/data"
etcdBkp = "testbackup-e-66716"
nameSpace = "openshift-etcd"
)
g.By("1. Create a PVC for requesting external volume")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "pvc-ext.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + pvcName, "NAMESPACE=" + nameSpace, "STORAGE=1Gi", "SCNAME=" + storageCn}
defer oc.AsAdmin().Run("delete").Args("pvc", pvcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
e2e.Logf("2. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("3. Create a oneOffBackup for external volume")
oneOffTemplate := filepath.Join(baseDir, "oneoffbackup.yaml")
paramsOneOff := []string{"-f", oneOffTemplate, "-p", "NAME=" + etcdBkp, "NAMESPACE=" + nameSpace, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("EtcdBackup", etcdBkp, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsOneOff...)
g.By("4. Wait for PVC to bind to the backup pod")
waitForPvcStatus(oc, nameSpace, pvcName)
g.By("5. Wait for backupjob to complete")
waitForOneOffBackupToComplete(oc, nameSpace, etcdBkp)
backupfile := getOneBackupFile(oc, nameSpace, etcdBkp)
o.Expect(backupfile).NotTo(o.BeEmpty(), "Failed to get the Backup file")
g.By("6. Create a test-pod to access the volume.")
testpodTemplate := filepath.Join(baseDir, "testpod.yaml")
paramsTpod := []string{"-f", testpodTemplate, "-p", "NAME=" + podName, "NAMESPACE=" + nameSpace, "PATH=" + bkpPath, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("pod", podName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsTpod...)
waitForPodStatus(oc, podName, nameSpace, "Running")
g.By("7. verify whether backup is created on external volume")
verify := verifyBkpFileCreationOnExternalVol(oc, podName, nameSpace, bkpPath, backupfile)
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on external volume")
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66717-Automated recurring backup for etcd using dynamically provisioned PV externally. [Disruptive]", func() {
g.By("Test for case OCP-66717 Automated recurring backup for etcd using dynamically provisioned PV externally.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
output, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
platform := strings.ToLower(output)
storageCn := ""
if platform == "aws" {
storageCn = "gp3-csi"
} else if platform == "azure" {
storageCn = "azurefile-csi"
} else if platform == "gcp" {
storageCn = "standard-csi"
} else {
g.Skip("this platform is currently not supported, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66717" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvcName = "etcd-backup-pvc-e-66717"
podName = "test-pod-66717"
maxNoBackup = 3
bkpPath = "/data"
etcdBkp = "testbackup-e-66717"
nameSpace = "openshift-etcd"
)
g.By("1. Create a PVC for requesting external volume")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "pvc-ext.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + pvcName, "NAMESPACE=" + nameSpace, "STORAGE=1Gi", "SCNAME=" + storageCn}
defer oc.AsAdmin().Run("delete").Args("pvc", pvcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
e2e.Logf("2. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("3. Create a recurringBackup for external volume")
recurTemplate := filepath.Join(baseDir, "recurringbackup.yaml")
paramsRecur := []string{"-f", recurTemplate, "-p", "NAME=" + etcdBkp, "MNUMBACKUP=" + strconv.Itoa(maxNoBackup), "PVCNAME=" + pvcName}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("Backup", etcdBkp).Execute()
exutil.CreateClusterResourceFromTemplate(oc, paramsRecur...)
g.By("4. Wait for PVC to bind to the backup pod")
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("Need to wait for 3 minutes as 3 jobs are scheduled after every 1 minute each")
time.Sleep(180 * time.Second)
g.By("5. Create a test-pod to access the volume.")
testpodTemplate := filepath.Join(baseDir, "testpod.yaml")
paramsTpod := []string{"-f", testpodTemplate, "-p", "NAME=" + podName, "NAMESPACE=" + nameSpace, "PATH=" + bkpPath, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("pod", podName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsTpod...)
waitForPodStatus(oc, podName, nameSpace, "Running")
e2e.Logf("6. Verify the backup creation")
verify := verifyRecurringBkpFileOnExternalVol(oc, podName, nameSpace, bkpPath, "backup-"+etcdBkp, "4")
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on external volume")
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66729-Validate default value for configurable parameters RetentionNumber for recurring backup of etcd. [Disruptive]", func() {
g.By("Test for case OCP-66729 Validate default value for configurable parameters RetentionNumber for recurring backup of etcd.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
output, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
platform := strings.ToLower(output)
storageCn := ""
if platform == "aws" {
storageCn = "gp3-csi"
} else if platform == "azure" {
storageCn = "azurefile-csi"
} else if platform == "gcp" {
storageCn = "standard-csi"
} else {
g.Skip("this platform is currently not supported, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66729" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvcName = "etcd-backup-pvc-e-66729"
podName = "test-pod-66729"
bkpPath = "/data"
etcdBkp = "testbackup-e-66729"
nameSpace = "openshift-etcd"
)
g.By("1. Create a PVC for requesting external volume")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "pvc-ext.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + pvcName, "NAMESPACE=" + nameSpace, "STORAGE=10Gi", "SCNAME=" + storageCn}
defer oc.AsAdmin().Run("delete").Args("pvc", pvcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
e2e.Logf("2. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("3. Create a recurringBackup for external volume")
recurTemplate := filepath.Join(baseDir, "recurringbkpdefault.yaml")
paramsRecur := []string{"-f", recurTemplate, "-p", "NAME=" + etcdBkp, "PVCNAME=" + pvcName}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("Backup", etcdBkp).Execute()
exutil.CreateClusterResourceFromTemplate(oc, paramsRecur...)
g.By("4. Wait for PVC to bind to the backup pod")
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("Need to wait for 15 minutes as 15 jobs are scheduled by default at an interval of 1 minute.")
time.Sleep(920 * time.Second)
g.By("5. Create a test-pod to access the volume.")
testpodTemplate := filepath.Join(baseDir, "testpod.yaml")
paramsTpod := []string{"-f", testpodTemplate, "-p", "NAME=" + podName, "NAMESPACE=" + nameSpace, "PATH=" + bkpPath, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("pod", podName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsTpod...)
waitForPodStatus(oc, podName, nameSpace, "Running")
e2e.Logf("6. Verify the backup creation")
verify := verifyRecurringBkpFileOnExternalVol(oc, podName, nameSpace, bkpPath, "backup-"+etcdBkp, "16")
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on external volume")
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-54999-Verify ETCD is not degraded in dual-stack networking cluster.[Serial]", func() {
g.By("Test for case OCP-54999 Verify ETCD is not degraded in dual-stack networking cluster.")
ipStackType := getIPStackType(oc)
g.By("Skip testing on ipv4 or ipv6 single stack cluster")
if ipStackType == "ipv4single" || ipStackType == "ipv6single" {
g.Skip("The case only can be run on dualstack cluster , skip for single stack cluster!!!")
}
g.By("Verifying etcd status on dualstack cluster")
if ipStackType == "dualstack" {
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
podAllRunning := checkEtcdPodStatus(oc)
if podAllRunning != true {
e2e.Failf("etcd pods are not in running state")
}
}
})
})
var _ = g.Describe("[sig-etcd] ETCD Microshift", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLIWithoutNamespace("default")
// author: [email protected]
g.It("MicroShiftOnly-Author:geliu-Medium-62738-[ETCD] Build Microshift prototype to launch etcd as an transient systemd unit", func() {
g.By("1. Get microshift node")
masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred())
o.Expect(masterNodes).NotTo(o.BeEmpty())
masterNode := masterNodes[0]
g.By("2. Check microshift version")
output, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "microshift version")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "MicroShift Version") {
e2e.Logf("Micorshift version is %v ", output)
} else {
e2e.Failf("Test Failed to get MicroShift Version.")
}
g.By("3. Check etcd version")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "microshift-etcd version")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "MicroShift-etcd Version: 4") {
e2e.Logf("micorshift-etcd version is %v ", output)
} else {
e2e.Failf("Test Failed to get MicroShift-etcd Version.")
}
g.By("4. Check etcd run as an transient systemd unit")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", output)
} else {
e2e.Failf("Test Failed to get microshift-etcd.scope status.")
}
g.By("5. Check etcd log")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "journalctl -u microshift-etcd.scope -o cat")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Running scope as unit: microshift-etcd.scope") {
e2e.Logf("micorshift-etcd log is %v ", output)
} else {
e2e.Failf("Test Failed to get micorshift-etcd log.")
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:skundu-Medium-62547-[ETCD] verify etcd quota size is configurable. [Disruptive]", func() {
var (
e2eTestNamespace = "microshift-ocp62547"
valCfg = 180
MemoryHighValue = valCfg * 1024 * 1024
)
g.By("1. Create new namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
g.By("2. Get microshift node")
masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred())
o.Expect(masterNodes).NotTo(o.BeEmpty())
masterNode := masterNodes[0]
g.By("3. Check microshift is running actively")
output, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift status is: %v ", output)
} else {
e2e.Failf("Test Failed to get microshift status.")
}
g.By("4. Check etcd status is running and active")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", output)
} else {
e2e.Failf("Test Failed to get microshift-etcd.scope status.")
}
g.By("5. Configure the memoryLimitMB field")
configYaml := "/etc/microshift/config.yaml"
etcdConfigCMD := fmt.Sprintf(`cat > %v << EOF
etcd:
memoryLimitMB: %v`, configYaml, valCfg)
defer waitForMicroshiftAfterRestart(oc, masterNodes[0])
defer exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "rm -f /etc/microshift/config.yaml")
_, etcdConfigcmdErr := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodes[0], []string{"-q"}, "bash", "-c", etcdConfigCMD)
o.Expect(etcdConfigcmdErr).NotTo(o.HaveOccurred())
g.By("6. Restart microshift")
waitForMicroshiftAfterRestart(oc, masterNodes[0])
g.By("7. Check etcd status is running and active, after successful restart")
opStatus, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opStatus, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", opStatus)
} else {
e2e.Failf("Test Failed to get microshift-etcd.scope status.")
}
g.By("8. Verify the value of memoryLimitMB field is corrcetly configured")
opConfig, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "/usr/bin/microshift show-config --mode effective")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opConfig, "memoryLimitMB: "+fmt.Sprint(valCfg)) {
e2e.Logf("memoryLimitMB is successfully verified")
} else {
e2e.Failf("Test Failed to set memoryLimitMB field")
}
g.By("9. Verify the value of memoryLimitMB field is corrcetly configured")
opStat, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl show microshift-etcd.scope | grep MemoryHigh")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opStat, fmt.Sprint(MemoryHighValue)) {
e2e.Logf("stat MemoryHigh is successfully verified")
} else {
e2e.Failf("Failed to verify stat MemoryHigh")
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:skundu-Medium-60945-[ETCD] etcd should start stop automatically when microshift is started or stopped. [Disruptive]", func() {
g.By("1. Get microshift node")
masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred())
o.Expect(masterNodes).NotTo(o.BeEmpty())
masterNode := masterNodes[0]
g.By("2. Check microshift is running actively")
output, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift status is: %v ", output)
} else {
e2e.Failf("Failed to get microshift status.")
}
g.By("3. Check etcd status is running and active")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", output)
} else {
e2e.Failf("Failed to get microshift-etcd.scope status.")
}
g.By("4. Restart microshift")
waitForMicroshiftAfterRestart(oc, masterNodes[0])
g.By("5. Check etcd status is running and active, after successful restart")
opStatus, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opStatus, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", opStatus)
} else {
e2e.Failf("Failed to get microshift-etcd.scope status.")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Author:geliu-Critical-66829-Tuning etcd latency parameters etcd_heartbeat_interval and etcd_election_timeout. [Disruptive]", func() {
defer func() {
e2e.Logf("Patch etcd cluster:controlPlaneHardwareSpeed for recovery.")
patchPath1 := "{\"spec\":{\"controlPlaneHardwareSpeed\":null}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
}()
e2e.Logf("patch etcd cluster to stardard.")
patchPath1 := "{\"spec\":{\"controlPlaneHardwareSpeed\":\"Standard\"}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
e2e.Logf("Force an etcd rollout, restart all etcd pods at a time to pick up the new values")
t := time.Now()
defer func() {
e2e.Logf("Patch etcd cluster:forceRedeploymentReason for recovery.")
patchPath1 := "{\"spec\":{\"forceRedeploymentReason\":null}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
}()
err0 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"forceRedeploymentReason\": \"hardwareSpeedChange-%s\"}}", t.Format("2023-01-02 15:04:05"))).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
e2e.Logf("Check the ETCD_ELECTION_TIMEOUT and ETCD_HEARTBEAT_INTERVAL in etcd pod.")
etcdPodList := getPodListByLabel(oc, "etcd=true")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[8].value}").Output()
if output != "1000" || err != nil {
e2e.Failf("ETCD_ELECTION_TIMEOUT is not default value: 1000")
}
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[13].value}").Output()
if output != "100" || err != nil {
e2e.Failf("ETCD_HEARTBEAT_INTERVAL is not default value: 100")
}
e2e.Logf("patch etcd cluster to Slower.")
patchPath1 = "{\"spec\":{\"controlPlaneHardwareSpeed\":\"Slower\"}}"
err0 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
e2e.Logf("Force an etcd rollout, restart all etcd pods at a time to pick up the new values")
err0 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"forceRedeploymentReason\": \"hardwareSpeedChange-%s\"}}", t.Format("2023-01-02 15:05:05"))).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
e2e.Logf("Check the ETCD_ELECTION_TIMEOUT and ETCD_HEARTBEAT_INTERVAL in etcd pod.")
etcdPodList = getPodListByLabel(oc, "etcd=true")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[8].value}").Output()
if output != "2500" || err != nil {
e2e.Failf("ETCD_ELECTION_TIMEOUT is not expected value: 2500")
}
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[13].value}").Output()
if output != "500" || err != nil {
e2e.Failf("ETCD_HEARTBEAT_INTERVAL is not expected value: 500")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Author:geliu-High-71790-Etcd db defragment manually. [Disruptive]", func() {
g.By("Find the etcd leader pods and record each db size.")
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
etcdMemDbSize := make(map[string]int)
etcdMemDbSizeLater := make(map[string]int)
etcdLeaderPod := ""
for _, etcdPod := range etcdPodList {
e2e.Logf("login etcd pod: %v to get etcd member db size.", etcdPod)
etcdCmd := "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $4}'"
output, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
etcdMemDbSize[etcdPod], _ = strconv.Atoi(output)
e2e.Logf("login etcd pod: %v to check endpoints status.", etcdPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $6}'"
output, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "true") {
etcdLeaderPod = etcdPod
} else {
e2e.Logf("login non-leader etcd pod: %v to do defrag db.", etcdPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 defrag"
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("login non-leader etcd pod: %v to record db size after defrag.", etcdPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $4}'"
output, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
etcdMemDbSizeLater[etcdPod], _ = strconv.Atoi(output)
}
}
e2e.Logf("login etcd leader pod: %v to do defrag db.", etcdLeaderPod)
etcdCmd := "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 defrag"
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("login etcd leader pod: %v to record db size after defrag.", etcdLeaderPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $4}'"
output, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
etcdMemDbSizeLater[etcdLeaderPod], _ = strconv.Atoi(output)
e2e.Logf(fmt.Sprintf("etcdleaderPod: %v", etcdLeaderPod))
g.By("Compare etcd db size before/after defrage.")
e2e.Logf("etcd db size before defrag.")
for k, v := range etcdMemDbSize {
e2e.Logf("etcd pod name: %v, db size: %d", k, v)
}
e2e.Logf("etcd db size after defrag.")
for k, v := range etcdMemDbSizeLater {
e2e.Logf("etcd pod name: %v, db size: %d", k, v)
}
for k, v := range etcdMemDbSize {
if v <= etcdMemDbSizeLater[k] {
e2e.Failf("etcd: %v db size is not reduce after defrag.", k)
}
}
g.By("Clear it if any NOSPACE alarms.")
etcdCmd = "etcdctl alarm list"
output, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
if output != "" {
etcdCmd = "etcdctl alarm disarm"
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Author:geliu-High-73511-Selectable etcd database size. [Disruptive]", func() {
g.By("check cluster has enabled TechPreviewNoUpgradec.")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
defer func() {
patchPath := "{\"spec\":{\"backendQuotaGiB\": 8}}"
output, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath).Output()
if strings.Contains(output, "etcd backendQuotaGiB may not be decreased") {
e2e.Logf("etcd backendQuotaGiB may not be decreased: %v ", output)
}
checkOperator(oc, "etcd")
}()
g.By("patch etcd cluster backendQuotaGiB to 16G.")
patchPath := "{\"spec\":{\"backendQuotaGiB\": 16}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
g.By("waiting for etcd rollout automatically, restart all etcd pods at a time to pick up the new values")
checkOperator(oc, "etcd")
g.By("verify ETCD_QUOTA_BACKEND_BYTES value in etcd pods.")
etcdPodList := getPodListByLabel(oc, "etcd=true")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[16].value}").Output()
if output != "17179869184" || err != nil {
e2e.Failf("ETCD_QUOTA_BACKEND_BYTES is not expected value: 17179869184")
}
})
// author: [email protected]
g.It("Author:geliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-75259-Auto rotation of etcd signer certs from ocp 4.17. [Disruptive]", func() {
g.By("Check the remaining lifetime of the signer certificate in openshift-etcd namespace.")
certificateNotBefore0, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io\\/certificate-not-before}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
certificateNotAfter0, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io\\/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("etcd signer certificate expired Not After: %v", certificateNotAfter0)
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err))
}
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after running case :: %v ", err)
}
}()
g.By("update the existing signer: when notAfter or notBefore is malformed.")
err = oc.AsAdmin().Run("patch").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-p", fmt.Sprintf("{\"metadata\": {\"annotations\": {\"auth.openshift.io/certificate-not-after\": \"%s\"}}}", certificateNotBefore0), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for etcd-signer rotation and cluster health.")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=30m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after delete etcd-signer :: %v ", err)
}
g.By("2nd Check the remaining lifetime of the new signer certificate in openshift-etcd namespace")
certificateNotAfter1, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
layout := "2006-01-02T15:04:05Z"
timeStr0, perr := time.Parse(layout, certificateNotAfter0)
o.Expect(perr).NotTo(o.HaveOccurred())
timeStr1, perr := time.Parse(layout, certificateNotAfter1)
o.Expect(perr).NotTo(o.HaveOccurred())
if timeStr1.Before(timeStr0) || timeStr1.Equal(timeStr0) {
e2e.Failf(fmt.Sprintf("etcd-signer certificate-not-after time value is wrong for new one %s is not after old one %s.", timeStr1, timeStr0))
}
})
// author: [email protected]
g.It("Author:geliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-75224-Manual rotation of etcd signer certs from ocp 4.17. [Disruptive]", func() {
g.By("Check the remaining lifetime of the signer certificate in openshift-etcd namespace.")
certificateNotAfter0, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io\\/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("etcd signer certificate expired Not After: %v", certificateNotAfter0)
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err))
}
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after running case :: %v ", err)
}
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Delete the existing signer.")
_, err = oc.AsAdmin().Run("delete").Args("-n", "openshift-etcd", "secret", "etcd-signer").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for etcd-signer rotation and cluster health.")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=40m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after delete etcd-signer :: %v ", err)
}
g.By("Check revision again, the output means that the last revision is >= 8")
revisionValue, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "configmap", "etcd-all-bundles", "-o=jsonpath={.metadata.annotations.openshift\\.io\\/ceo-bundle-rollout-revision}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
revisionValueInt, err := strconv.Atoi(revisionValue)
o.Expect(err).NotTo(o.HaveOccurred())
if revisionValueInt <= 8 {
e2e.Failf(fmt.Sprintf("etcd-signer revision value is %s, but not >=8", revisionValue))
}
g.By("2nd Check the remaining lifetime of the new signer certificate in openshift-etcd namespace")
certificateNotAfter1, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
layout := "2006-01-02T15:04:05Z"
timeStr0, perr := time.Parse(layout, certificateNotAfter0)
o.Expect(perr).NotTo(o.HaveOccurred())
timeStr1, perr := time.Parse(layout, certificateNotAfter1)
o.Expect(perr).NotTo(o.HaveOccurred())
if timeStr1.Before(timeStr0) || timeStr1.Equal(timeStr0) {
e2e.Failf(fmt.Sprintf("etcd-signer certificate-not-after time value is wrong for new one %s is not after old one %s.", timeStr1, timeStr0))
}
})
})
| package etcd | ||||
test case | openshift/openshift-tests-private | c869d229-4f9b-4cbb-a23f-4fe4c2aa8a62 | NonHyperShiftHOST-Author:skundu-Critical-43330-Ensure a safety net for the 3.4 to 3.5 etcd upgrade | ['"os/exec"', '"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Author:skundu-Critical-43330-Ensure a safety net for the 3.4 to 3.5 etcd upgrade", func() {
var (
err error
msg string
)
g.By("Test for case OCP-43330 Ensure a safety net for the 3.4 to 3.5 etcd upgrade")
oc.SetupProject()
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
e2e.Logf("verify whether etcd version is 3.5")
output, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPodList[0], "etcdctl")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("3.5"))
e2e.Logf("get the Kubernetes version")
version, err := exec.Command("bash", "-c", "oc version | grep Kubernetes |awk '{print $3}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sVersion := string(version)
kubeVer := strings.Split(sVersion, "+")[0]
kubeVer = strings.TrimSpace(kubeVer)
// Sometimes, there will be a version difference between kubeletVersion and k8s version due to RHCOS version.
// It will be matching once there is a new RHCOS version. Detail see https://issues.redhat.com/browse/OCPBUGS-48612
pattern := regexp.MustCompile(`\S+?.\d+`)
validVer := pattern.FindAllString(kubeVer, -1)
e2e.Logf("Version considered is %v", validVer[0])
e2e.Logf("retrieve all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("verify the kubelet version in node details")
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNodeList[0], "-o", "custom-columns=VERSION:.status.nodeInfo.kubeletVersion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring(validVer[0]))
}) | |||||
test case | openshift/openshift-tests-private | 54bade33-4600-4a3a-b8b5-c350f25c5ca1 | NonHyperShiftHOST-Author:geliu-Medium-52418-Add new parameter to avoid Potential etcd inconsistent revision and data occurs | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Author:geliu-Medium-52418-Add new parameter to avoid Potential etcd inconsistent revision and data occurs", func() {
g.By("Test for case OCP-52418-Add new parameter to avoid Potential etcd inconsistent revision and data occurs")
oc.SetupProject()
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
e2e.Logf("get the expected parameter from etcd member pod")
output, err := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[*].command[*]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("experimental-initial-corrupt-check=true"))
}) | ||||||
test case | openshift/openshift-tests-private | 4ecef929-c926-4217-8fd7-7b4da10664fb | Author:skundu-NonHyperShiftHOST-WRS-LEVEL0-NonPreRelease-Longduration-Critical-52312-V-ACS.03-cluster-backup.sh script has a conflict to use /etc/kubernetes/static-pod-certs folder [Serial] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("Author:skundu-NonHyperShiftHOST-WRS-LEVEL0-NonPreRelease-Longduration-Critical-52312-V-ACS.03-cluster-backup.sh script has a conflict to use /etc/kubernetes/static-pod-certs folder [Serial]", func() {
g.By("Test for case OCP-52312 cluster-backup.sh script has a conflict to use /etc/kubernetes/static-pod-certs folder.")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
defer func() {
e2e.Logf("Remove the certs directory")
_, errCert := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "rm", "-rf", "/etc/kubernetes/static-pod-certs")
o.Expect(errCert).NotTo(o.HaveOccurred())
}()
e2e.Logf("Create the certs directory")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "mkdir", "/etc/kubernetes/static-pod-certs")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
e2e.Logf("Remove the backup directory")
_, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodeList[0], []string{"-q"}, "rm", "-rf", "/home/core/assets/backup")
o.Expect(err).NotTo(o.HaveOccurred())
}()
firstMNode := []string{masterNodeList[0]}
e2e.Logf("Run the backup")
masterN, _ := runDRBackup(oc, firstMNode)
e2e.Logf("Etcd db successfully backed up on node %v", masterN)
}) | ||||||
test case | openshift/openshift-tests-private | cce28047-ad08-46e1-a0bf-a574f516e019 | NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-57119-SSL/TLS: Birthday attack against 64 bit block ciphers (SWEET32) etcd metrics port 9979 [Serial] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-57119-SSL/TLS: Birthday attack against 64 bit block ciphers (SWEET32) etcd metrics port 9979 [Serial]", func() {
g.By("Test for case OCP-57119 SSL/TLS: Birthday attack against 64 bit block ciphers (SWEET32) etcd metrics port 9979 .")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("get the ip of the first node")
ipOfNode := getIpOfMasterNode(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Node IP %v", ipOfNode)
e2e.Logf("Verify the SSL Health of port 9979")
res := verifySSLHealth(oc, ipOfNode, masterNodeList[0])
if res {
e2e.Logf("SSL health on port 9979 is healthy.")
} else {
e2e.Failf("SSL health on port 9979 is vulnerable")
}
}) | ||||||
test case | openshift/openshift-tests-private | 10cfab98-4e7b-4ed0-bba5-50ac49fd0f77 | Author:skundu-NonHyperShiftHOST-WRS-LEVEL0-Critical-24280-V-ACS.03-Etcd basic verification | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("Author:skundu-NonHyperShiftHOST-WRS-LEVEL0-Critical-24280-V-ACS.03-Etcd basic verification", func() {
g.By("Test for case OCP-52418-Etcd basic verification")
e2e.Logf("check cluster Etcd operator status")
checkOperator(oc, "etcd")
e2e.Logf("verify cluster Etcd operator pod is Running")
podOprtAllRunning := checkEtcdOperatorPodStatus(oc)
if podOprtAllRunning != true {
e2e.Failf("etcd operator pod is not in running state")
}
e2e.Logf("retrieve all the master node")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
if len(masterNodeList) != len(etcdPodList) {
e2e.Failf("mismatch in the number of etcd pods and master nodes.")
}
e2e.Logf("Ensure all the etcd pods are running")
podAllRunning := checkEtcdPodStatus(oc)
if podAllRunning != true {
e2e.Failf("etcd pods are not in running state")
}
}) | ||||||
test case | openshift/openshift-tests-private | d9cc2ae5-8710-4df3-8d95-d3644b1156e6 | NonHyperShiftHOST-Author:geliu-Critical-54129-New etcd alerts to be added to the monitoring stack in ocp 4.10. | ['"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Author:geliu-Critical-54129-New etcd alerts to be added to the monitoring stack in ocp 4.10.", func() {
g.By("Test for case OCP-54129-New etcd alerts to be added to the monitoring stack in ocp 4.10.")
e2e.Logf("Check new alert msg have been updated")
output, err := exec.Command("bash", "-c", "oc -n openshift-monitoring get cm prometheus-k8s-rulefiles-0 -oyaml | grep \"alert: etcd\"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("etcdHighFsyncDurations"))
o.Expect(output).To(o.ContainSubstring("etcdDatabaseQuotaLowSpace"))
o.Expect(output).To(o.ContainSubstring("etcdExcessiveDatabaseGrowth"))
}) | |||||
test case | openshift/openshift-tests-private | e41d135f-6aee-4de3-b68a-090d8f1281e6 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:skundu-Critical-73564-Validate cert rotation in 4.16. [Disruptive] | ['"fmt"', '"os"', '"os/exec"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:skundu-Critical-73564-Validate cert rotation in 4.16. [Disruptive]", func() {
exutil.By("Test for case OCP-73564-Validate cert rotation in 4.16.")
e2e.Logf("Check the lifetime of newly created signer certificate in openshift-etcd namespace")
filename := "73564_out.json"
initialexpiry, _ := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
e2e.Logf("initial expiry is: %v ", initialexpiry)
e2e.Logf("Recreate the signer by deleting it")
defer func() {
checkOperator(oc, "etcd")
checkOperator(oc, "kube-apiserver")
}()
_, errdel := oc.AsAdmin().Run("delete").Args("-n", "openshift-etcd", "secret", "etcd-signer").Output()
o.Expect(errdel).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
checkOperator(oc, "kube-apiserver")
e2e.Logf("Verify the newly created expiry time is differnt from the initial one")
newexpiry, _ := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
e2e.Logf("renewed expiry is: %v ", newexpiry)
if initialexpiry == newexpiry {
e2e.Failf("The signer cert expiry did n't renew")
}
e2e.Logf("Once the revision with the updated bundle is rolled out, swap the original CA in the openshift-config namespace, with the newly rotated one in openshift-etcd")
out, _ := oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-ojson").OutputToFile(filename)
jqCmd := fmt.Sprintf(`cat %s | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' > /tmp/73564.yaml`, out)
_, errex := exec.Command("bash", "-c", jqCmd).Output()
e2e.Logf("jqcmd is %v", jqCmd)
o.Expect(errex).NotTo(o.HaveOccurred())
defer os.RemoveAll("/tmp/73564.yaml")
_, errj := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-n", "openshift-config", "-f", "/tmp/73564.yaml").Output()
o.Expect(errj).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
checkOperator(oc, "kube-apiserver")
e2e.Logf("Remove old CA from the trust bundle. This will regenerate the bundle with only the signer certificates from openshift-config and openshift-etcd, effectively removing all unknown/old public keys.")
_, err := oc.AsAdmin().Run("delete").Args("-n", "openshift-etcd", "configmap", "etcd-ca-bundle").Output()
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | 55eca084-c214-4405-8914-f8e8b5d19124 | NonHyperShiftHOST-PstChkUpgrade-ConnectedOnly-Author:skundu-NonPreRelease-Critical-22665-Check etcd image have been update to target release value after upgrade [Serial] | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-PstChkUpgrade-ConnectedOnly-Author:skundu-NonPreRelease-Critical-22665-Check etcd image have been update to target release value after upgrade [Serial]", func() {
g.By("Test for case OCP-22665 Check etcd image have been update to target release value after upgrade.")
var (
errImg error
etcdImageID string
)
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
e2e.Logf("get the image id from the etcd pod")
etcdImageID, errImg = oc.AsAdmin().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.status.containerStatuses[?(@.name==\"etcd\")].image}").Output()
o.Expect(errImg).NotTo(o.HaveOccurred())
e2e.Logf("etcd imagid is %v", etcdImageID)
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("get the clusterVersion")
clusterVersion, errClvr := oc.AsAdmin().Run("get").Args("clusterversions", "version", "-o=jsonpath={.status.desired.image}").Output()
o.Expect(errClvr).NotTo(o.HaveOccurred())
e2e.Logf("clusterVersion is %v", clusterVersion)
httpProxy, httpsProxy := getGlobalProxy(oc)
if strings.Contains(httpProxy, "http") || strings.Contains(httpsProxy, "https") {
e2e.Logf("It's a proxy platform.")
ret := verifyImageIDwithProxy(oc, masterNodeList, httpProxy, httpsProxy, etcdImageID, clusterVersion)
if ret {
e2e.Logf("Image version of etcd successfully updated to the target release on all the node(s) of cluster with proxy")
} else {
e2e.Failf("etcd Image update to target release on proxy cluster failed")
}
} else {
g.By("Run the command on node(s)")
res := verifyImageIDInDebugNode(oc, masterNodeList, etcdImageID, clusterVersion)
if res {
e2e.Logf("Image version of etcd successfully updated to the target release on all the node(s)")
} else {
e2e.Failf("etcd Image update to target release failed")
}
}
}) | |||||
test case | openshift/openshift-tests-private | 3dc88d31-d89b-46ff-b4b9-10d657b60ef9 | NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-64148-Verify etcd-bootstrap member is removed properly [Serial] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-64148-Verify etcd-bootstrap member is removed properly [Serial]", func() {
g.By("Test for case OCP-64148 Verify etcd-bootstrap member is removed properly.")
g.By("Verifying etcd cluster message and status")
res := verifyEtcdClusterMsgStatus(oc, "etcd-bootstrap member is already removed", "True")
if res {
e2e.Logf("etcd bootstrap member successfully removed")
} else {
e2e.Failf("failed to remove the etcd bootstrap member")
}
}) | ||||||
test case | openshift/openshift-tests-private | 3319e51f-2ab9-4d57-9d2d-4f191aa4abc2 | NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66726-Automated one-off backup for etcd using PVC on hostpath. [Disruptive] | ['"bufio"', '"fmt"', '"os"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66726-Automated one-off backup for etcd using PVC on hostpath. [Disruptive]", func() {
g.By("Test for case OCP-66726 Automated one-off backup for etcd using PVC on hostpath.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvName = "etcd-backup-pv-h-66726"
pvcName = "etcd-backup-pvc-h-66726"
bkphostpath = "/etc/kubernetes/cluster-backup"
etcdBkp = "testbackup-h-66726"
nameSpace = "openshift-etcd"
pvYamlFile = tmpdir + "pv-hostpath.yaml"
pvcYamlFile = tmpdir + "pvc-hostpath.yaml"
oneOffBkphpYamlFile = tmpdir + "oneOffbkp-hostpath.yaml"
pvYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolume
metadata:
name: %s
spec:
storageClassName: manual
capacity:
storage: %s
accessModes:
- ReadWriteOnce
hostPath:
path: %s
`, pvName, "10Gi", bkphostpath)
pvcYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: %s
namespace: openshift-etcd
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: %s
volumeName: %s
`, pvcName, "10Gi", pvName)
oneOffBkphpYaml = fmt.Sprintf(`apiVersion: operator.openshift.io/v1alpha1
kind: EtcdBackup
metadata:
name: %s
namespace: openshift-etcd
spec:
pvcName: %s`, etcdBkp, pvcName)
)
g.By("2 Create a PV for hostpath")
f, err := os.Create(pvYamlFile)
o.Expect(err).NotTo(o.HaveOccurred())
defer f.Close()
w := bufio.NewWriter(f)
_, werr := w.WriteString(pvYaml)
w.Flush()
o.Expect(werr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvYamlFile).Execute()
pvErr := oc.AsAdmin().Run("create").Args("-f", pvYamlFile).Execute()
o.Expect(pvErr).NotTo(o.HaveOccurred())
g.By("3 Create a PVC for hostpath")
pf, errp := os.Create(pvcYamlFile)
o.Expect(errp).NotTo(o.HaveOccurred())
defer pf.Close()
w2 := bufio.NewWriter(pf)
_, perr := w2.WriteString(pvcYaml)
w2.Flush()
o.Expect(perr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
pvcErr := oc.AsAdmin().Run("create").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
o.Expect(pvcErr).NotTo(o.HaveOccurred())
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("4. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("5 Create a oneOffBackup for hostpath")
bkpf, bkperr := os.Create(oneOffBkphpYamlFile)
o.Expect(bkperr).NotTo(o.HaveOccurred())
defer bkpf.Close()
w3 := bufio.NewWriter(bkpf)
_, bwerr := w3.WriteString(oneOffBkphpYaml)
w3.Flush()
o.Expect(bwerr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", oneOffBkphpYamlFile).Execute()
bkpErr := oc.AsAdmin().Run("create").Args("-f", oneOffBkphpYamlFile).Execute()
o.Expect(bkpErr).NotTo(o.HaveOccurred())
waitForOneOffBackupToComplete(oc, nameSpace, etcdBkp)
backupfile := getOneBackupFile(oc, nameSpace, etcdBkp)
o.Expect(backupfile).NotTo(o.BeEmpty(), "Failed to get the Backup file")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Verify the backup creation")
verify := verifyBkpFileCreationHost(oc, masterNodeList, bkphostpath, backupfile)
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on node")
}) | |||||
test case | openshift/openshift-tests-private | d0ed02b7-7301-45a9-8060-f0bfda7bf7ba | NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66727-Automated recurring backup for etcd using PVC on hostpath. [Disruptive] | ['"bufio"', '"fmt"', '"os"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66727-Automated recurring backup for etcd using PVC on hostpath. [Disruptive]", func() {
g.By("Test for case OCP-66727 Automated recurring backup for etcd using PVC on hostpath.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66727" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvName = "etcd-backup-pv-h-66727"
pvcName = "etcd-backup-pvc-h-66727"
bkphostpath = "/etc/kubernetes/cluster-backup"
etcdBkp = "testbackup-h-66727"
maxNoBackup = 3
nameSpace = "openshift-etcd"
pvYamlFile = tmpdir + "pv-hostpath.yaml"
pvcYamlFile = tmpdir + "pvc-hostpath.yaml"
recurringBkphpYamlFile = tmpdir + "recurringBkp-hostpath.yaml"
pvYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolume
metadata:
name: %s
spec:
storageClassName: manual
capacity:
storage: %s
accessModes:
- ReadWriteOnce
hostPath:
path: %s
`, pvName, "10Gi", bkphostpath)
pvcYaml = fmt.Sprintf(`apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: %s
namespace: openshift-etcd
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: %s
volumeName: %s
`, pvcName, "10Gi", pvName)
recurringBkphpYaml = fmt.Sprintf(`apiVersion: config.openshift.io/v1alpha1
kind: Backup
metadata:
name: %s
spec:
etcd:
schedule: "*/1 * * * *"
timeZone: "UTC"
retentionPolicy:
retentionType: RetentionNumber
retentionNumber:
maxNumberOfBackups: %d
pvcName: %s`, etcdBkp, maxNoBackup, pvcName)
)
g.By("2 Create a PV for hostpath")
f, err := os.Create(pvYamlFile)
o.Expect(err).NotTo(o.HaveOccurred())
defer f.Close()
w := bufio.NewWriter(f)
_, werr := w.WriteString(pvYaml)
w.Flush()
o.Expect(werr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvYamlFile).Execute()
pvErr := oc.AsAdmin().Run("create").Args("-f", pvYamlFile).Execute()
o.Expect(pvErr).NotTo(o.HaveOccurred())
g.By("3 Create a PVC for hostpath")
pf, errp := os.Create(pvcYamlFile)
o.Expect(errp).NotTo(o.HaveOccurred())
defer pf.Close()
w2 := bufio.NewWriter(pf)
_, perr := w2.WriteString(pvcYaml)
w2.Flush()
o.Expect(perr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
pvcErr := oc.AsAdmin().Run("create").Args("-f", pvcYamlFile, "-n", nameSpace).Execute()
o.Expect(pvcErr).NotTo(o.HaveOccurred())
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("4. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("5 Create a recurringBackup for hostpath")
bkpf, bkperr := os.Create(recurringBkphpYamlFile)
o.Expect(bkperr).NotTo(o.HaveOccurred())
defer bkpf.Close()
w3 := bufio.NewWriter(bkpf)
_, bwerr := w3.WriteString(recurringBkphpYaml)
w3.Flush()
o.Expect(bwerr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-f", recurringBkphpYamlFile).Execute()
bkpErr := oc.AsAdmin().Run("create").Args("-f", recurringBkphpYamlFile).Execute()
o.Expect(bkpErr).NotTo(o.HaveOccurred())
waitForRecurBackupJobToComplete(oc, nameSpace, etcdBkp, "Succeeded")
e2e.Logf("select all the master nodes")
masterNodeList := getNodeListByLabel(oc, "node-role.kubernetes.io/master=")
e2e.Logf("Need to wait for 3 minutes as 3 jobs are scheduled after every 1 minute each")
time.Sleep(180 * time.Second)
e2e.Logf("Verify the backup creation")
verify := verifyRecurBkpFileCreationHost(oc, masterNodeList, bkphostpath, "backup-"+etcdBkp, "4")
o.Expect(verify).To(o.BeTrue(), "Failed to verify recurring backup files on node")
}) | |||||
test case | openshift/openshift-tests-private | 6cfe6394-32fd-4e8d-b1c5-ebb40aed1f91 | NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66716-Automated one-off backup for etcd using dynamically provisioned PV externally. [Disruptive] | ['"os"', '"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66716-Automated one-off backup for etcd using dynamically provisioned PV externally. [Disruptive]", func() {
g.By("Test for case OCP-66716 Automated one-off backup for etcd using dynamically provisioned PV externally.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
output, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
platform := strings.ToLower(output)
storageCn := ""
if platform == "aws" {
storageCn = "gp3-csi"
} else if platform == "azure" {
storageCn = "azurefile-csi"
} else if platform == "gcp" {
storageCn = "standard-csi"
} else {
g.Skip("this platform is currently not supported, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66716" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvcName = "etcd-backup-pvc-e-66716"
podName = "test-pod-66716"
bkpPath = "/data"
etcdBkp = "testbackup-e-66716"
nameSpace = "openshift-etcd"
)
g.By("1. Create a PVC for requesting external volume")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "pvc-ext.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + pvcName, "NAMESPACE=" + nameSpace, "STORAGE=1Gi", "SCNAME=" + storageCn}
defer oc.AsAdmin().Run("delete").Args("pvc", pvcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
e2e.Logf("2. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("3. Create a oneOffBackup for external volume")
oneOffTemplate := filepath.Join(baseDir, "oneoffbackup.yaml")
paramsOneOff := []string{"-f", oneOffTemplate, "-p", "NAME=" + etcdBkp, "NAMESPACE=" + nameSpace, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("EtcdBackup", etcdBkp, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsOneOff...)
g.By("4. Wait for PVC to bind to the backup pod")
waitForPvcStatus(oc, nameSpace, pvcName)
g.By("5. Wait for backupjob to complete")
waitForOneOffBackupToComplete(oc, nameSpace, etcdBkp)
backupfile := getOneBackupFile(oc, nameSpace, etcdBkp)
o.Expect(backupfile).NotTo(o.BeEmpty(), "Failed to get the Backup file")
g.By("6. Create a test-pod to access the volume.")
testpodTemplate := filepath.Join(baseDir, "testpod.yaml")
paramsTpod := []string{"-f", testpodTemplate, "-p", "NAME=" + podName, "NAMESPACE=" + nameSpace, "PATH=" + bkpPath, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("pod", podName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsTpod...)
waitForPodStatus(oc, podName, nameSpace, "Running")
g.By("7. verify whether backup is created on external volume")
verify := verifyBkpFileCreationOnExternalVol(oc, podName, nameSpace, bkpPath, backupfile)
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on external volume")
}) | |||||
test case | openshift/openshift-tests-private | d61d7b5f-ec65-48d0-b847-8820f185c79e | NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66717-Automated recurring backup for etcd using dynamically provisioned PV externally. [Disruptive] | ['"os"', '"path/filepath"', '"strconv"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66717-Automated recurring backup for etcd using dynamically provisioned PV externally. [Disruptive]", func() {
g.By("Test for case OCP-66717 Automated recurring backup for etcd using dynamically provisioned PV externally.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
output, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
platform := strings.ToLower(output)
storageCn := ""
if platform == "aws" {
storageCn = "gp3-csi"
} else if platform == "azure" {
storageCn = "azurefile-csi"
} else if platform == "gcp" {
storageCn = "standard-csi"
} else {
g.Skip("this platform is currently not supported, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66717" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvcName = "etcd-backup-pvc-e-66717"
podName = "test-pod-66717"
maxNoBackup = 3
bkpPath = "/data"
etcdBkp = "testbackup-e-66717"
nameSpace = "openshift-etcd"
)
g.By("1. Create a PVC for requesting external volume")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "pvc-ext.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + pvcName, "NAMESPACE=" + nameSpace, "STORAGE=1Gi", "SCNAME=" + storageCn}
defer oc.AsAdmin().Run("delete").Args("pvc", pvcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
e2e.Logf("2. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("3. Create a recurringBackup for external volume")
recurTemplate := filepath.Join(baseDir, "recurringbackup.yaml")
paramsRecur := []string{"-f", recurTemplate, "-p", "NAME=" + etcdBkp, "MNUMBACKUP=" + strconv.Itoa(maxNoBackup), "PVCNAME=" + pvcName}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("Backup", etcdBkp).Execute()
exutil.CreateClusterResourceFromTemplate(oc, paramsRecur...)
g.By("4. Wait for PVC to bind to the backup pod")
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("Need to wait for 3 minutes as 3 jobs are scheduled after every 1 minute each")
time.Sleep(180 * time.Second)
g.By("5. Create a test-pod to access the volume.")
testpodTemplate := filepath.Join(baseDir, "testpod.yaml")
paramsTpod := []string{"-f", testpodTemplate, "-p", "NAME=" + podName, "NAMESPACE=" + nameSpace, "PATH=" + bkpPath, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("pod", podName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsTpod...)
waitForPodStatus(oc, podName, nameSpace, "Running")
e2e.Logf("6. Verify the backup creation")
verify := verifyRecurringBkpFileOnExternalVol(oc, podName, nameSpace, bkpPath, "backup-"+etcdBkp, "4")
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on external volume")
}) | |||||
test case | openshift/openshift-tests-private | f8dcfce9-c2fb-49c4-8980-b0c370f1b033 | NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66729-Validate default value for configurable parameters RetentionNumber for recurring backup of etcd. [Disruptive] | ['"os"', '"path/filepath"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:skundu-Critical-66729-Validate default value for configurable parameters RetentionNumber for recurring backup of etcd. [Disruptive]", func() {
g.By("Test for case OCP-66729 Validate default value for configurable parameters RetentionNumber for recurring backup of etcd.")
featureSet, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
output, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
platform := strings.ToLower(output)
storageCn := ""
if platform == "aws" {
storageCn = "gp3-csi"
} else if platform == "azure" {
storageCn = "azurefile-csi"
} else if platform == "gcp" {
storageCn = "standard-csi"
} else {
g.Skip("this platform is currently not supported, skip it!")
}
tmpdir := "/tmp/OCP-etcd-cases-66729" + exutil.GetRandomString() + "/"
defer os.RemoveAll(tmpdir)
err := os.MkdirAll(tmpdir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
var (
pvcName = "etcd-backup-pvc-e-66729"
podName = "test-pod-66729"
bkpPath = "/data"
etcdBkp = "testbackup-e-66729"
nameSpace = "openshift-etcd"
)
g.By("1. Create a PVC for requesting external volume")
baseDir := exutil.FixturePath("testdata", "etcd")
pvcTemplate := filepath.Join(baseDir, "pvc-ext.yaml")
params := []string{"-f", pvcTemplate, "-p", "NAME=" + pvcName, "NAMESPACE=" + nameSpace, "STORAGE=10Gi", "SCNAME=" + storageCn}
defer oc.AsAdmin().Run("delete").Args("pvc", pvcName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, params...)
e2e.Logf("2. check and enable the CRDs")
etcdbkpOpCRDExisting := isCRDExisting(oc, "etcdbackups.operator.openshift.io")
if !etcdbkpOpCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "etcdbackups.operator.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeCrd.yaml")
}
etcdBkpConCRDExisting := isCRDExisting(oc, "backups.config.openshift.io")
if !etcdBkpConCRDExisting {
defer oc.AsAdmin().Run("delete").Args("CustomResourceDefinition", "backups.config.openshift.io").Execute()
createCRD(oc, "etcdbackupTechPreviewNoUpgradeConfigCrd.yaml")
}
g.By("3. Create a recurringBackup for external volume")
recurTemplate := filepath.Join(baseDir, "recurringbkpdefault.yaml")
paramsRecur := []string{"-f", recurTemplate, "-p", "NAME=" + etcdBkp, "PVCNAME=" + pvcName}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("Backup", etcdBkp).Execute()
exutil.CreateClusterResourceFromTemplate(oc, paramsRecur...)
g.By("4. Wait for PVC to bind to the backup pod")
waitForPvcStatus(oc, nameSpace, pvcName)
e2e.Logf("Need to wait for 15 minutes as 15 jobs are scheduled by default at an interval of 1 minute.")
time.Sleep(920 * time.Second)
g.By("5. Create a test-pod to access the volume.")
testpodTemplate := filepath.Join(baseDir, "testpod.yaml")
paramsTpod := []string{"-f", testpodTemplate, "-p", "NAME=" + podName, "NAMESPACE=" + nameSpace, "PATH=" + bkpPath, "PVCNAME=" + pvcName}
defer oc.AsAdmin().Run("delete").Args("pod", podName, "-n", nameSpace).Execute()
exutil.CreateNsResourceFromTemplate(oc, nameSpace, paramsTpod...)
waitForPodStatus(oc, podName, nameSpace, "Running")
e2e.Logf("6. Verify the backup creation")
verify := verifyRecurringBkpFileOnExternalVol(oc, podName, nameSpace, bkpPath, "backup-"+etcdBkp, "16")
o.Expect(verify).To(o.BeTrue(), "Failed to verify backup creation on external volume")
}) | |||||
test case | openshift/openshift-tests-private | 04797295-9eb9-4d1d-b4c3-1d5af0545e0b | NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-54999-Verify ETCD is not degraded in dual-stack networking cluster.[Serial] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-Author:skundu-NonPreRelease-Longduration-Critical-54999-Verify ETCD is not degraded in dual-stack networking cluster.[Serial]", func() {
g.By("Test for case OCP-54999 Verify ETCD is not degraded in dual-stack networking cluster.")
ipStackType := getIPStackType(oc)
g.By("Skip testing on ipv4 or ipv6 single stack cluster")
if ipStackType == "ipv4single" || ipStackType == "ipv6single" {
g.Skip("The case only can be run on dualstack cluster , skip for single stack cluster!!!")
}
g.By("Verifying etcd status on dualstack cluster")
if ipStackType == "dualstack" {
g.By("Check etcd oprator status")
checkOperator(oc, "etcd")
podAllRunning := checkEtcdPodStatus(oc)
if podAllRunning != true {
e2e.Failf("etcd pods are not in running state")
}
}
}) | ||||||
test case | openshift/openshift-tests-private | 843766bd-477d-4d64-b383-54ff44b6cca1 | MicroShiftOnly-Author:geliu-Medium-62738-[ETCD] Build Microshift prototype to launch etcd as an transient systemd unit | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("MicroShiftOnly-Author:geliu-Medium-62738-[ETCD] Build Microshift prototype to launch etcd as an transient systemd unit", func() {
g.By("1. Get microshift node")
masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred())
o.Expect(masterNodes).NotTo(o.BeEmpty())
masterNode := masterNodes[0]
g.By("2. Check microshift version")
output, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "microshift version")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "MicroShift Version") {
e2e.Logf("Micorshift version is %v ", output)
} else {
e2e.Failf("Test Failed to get MicroShift Version.")
}
g.By("3. Check etcd version")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "microshift-etcd version")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "MicroShift-etcd Version: 4") {
e2e.Logf("micorshift-etcd version is %v ", output)
} else {
e2e.Failf("Test Failed to get MicroShift-etcd Version.")
}
g.By("4. Check etcd run as an transient systemd unit")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", output)
} else {
e2e.Failf("Test Failed to get microshift-etcd.scope status.")
}
g.By("5. Check etcd log")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "journalctl -u microshift-etcd.scope -o cat")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Running scope as unit: microshift-etcd.scope") {
e2e.Logf("micorshift-etcd log is %v ", output)
} else {
e2e.Failf("Test Failed to get micorshift-etcd log.")
}
}) | |||||
test case | openshift/openshift-tests-private | 1f14d9bc-fddf-4498-bfb0-ec20abfe56ec | MicroShiftOnly-Author:skundu-Medium-62547-[ETCD] verify etcd quota size is configurable. [Disruptive] | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("MicroShiftOnly-Author:skundu-Medium-62547-[ETCD] verify etcd quota size is configurable. [Disruptive]", func() {
var (
e2eTestNamespace = "microshift-ocp62547"
valCfg = 180
MemoryHighValue = valCfg * 1024 * 1024
)
g.By("1. Create new namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
g.By("2. Get microshift node")
masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred())
o.Expect(masterNodes).NotTo(o.BeEmpty())
masterNode := masterNodes[0]
g.By("3. Check microshift is running actively")
output, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift status is: %v ", output)
} else {
e2e.Failf("Test Failed to get microshift status.")
}
g.By("4. Check etcd status is running and active")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", output)
} else {
e2e.Failf("Test Failed to get microshift-etcd.scope status.")
}
g.By("5. Configure the memoryLimitMB field")
configYaml := "/etc/microshift/config.yaml"
etcdConfigCMD := fmt.Sprintf(`cat > %v << EOF
etcd:
memoryLimitMB: %v`, configYaml, valCfg)
defer waitForMicroshiftAfterRestart(oc, masterNodes[0])
defer exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "rm -f /etc/microshift/config.yaml")
_, etcdConfigcmdErr := exutil.DebugNodeWithOptionsAndChroot(oc, masterNodes[0], []string{"-q"}, "bash", "-c", etcdConfigCMD)
o.Expect(etcdConfigcmdErr).NotTo(o.HaveOccurred())
g.By("6. Restart microshift")
waitForMicroshiftAfterRestart(oc, masterNodes[0])
g.By("7. Check etcd status is running and active, after successful restart")
opStatus, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opStatus, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", opStatus)
} else {
e2e.Failf("Test Failed to get microshift-etcd.scope status.")
}
g.By("8. Verify the value of memoryLimitMB field is corrcetly configured")
opConfig, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "/usr/bin/microshift show-config --mode effective")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opConfig, "memoryLimitMB: "+fmt.Sprint(valCfg)) {
e2e.Logf("memoryLimitMB is successfully verified")
} else {
e2e.Failf("Test Failed to set memoryLimitMB field")
}
g.By("9. Verify the value of memoryLimitMB field is corrcetly configured")
opStat, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl show microshift-etcd.scope | grep MemoryHigh")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opStat, fmt.Sprint(MemoryHighValue)) {
e2e.Logf("stat MemoryHigh is successfully verified")
} else {
e2e.Failf("Failed to verify stat MemoryHigh")
}
}) | |||||
test case | openshift/openshift-tests-private | 9a19e876-4a76-430e-a566-8827914a6e75 | MicroShiftOnly-Author:skundu-Medium-60945-[ETCD] etcd should start stop automatically when microshift is started or stopped. [Disruptive] | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("MicroShiftOnly-Author:skundu-Medium-60945-[ETCD] etcd should start stop automatically when microshift is started or stopped. [Disruptive]", func() {
g.By("1. Get microshift node")
masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred())
o.Expect(masterNodes).NotTo(o.BeEmpty())
masterNode := masterNodes[0]
g.By("2. Check microshift is running actively")
output, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift status is: %v ", output)
} else {
e2e.Failf("Failed to get microshift status.")
}
g.By("3. Check etcd status is running and active")
output, err = exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", output)
} else {
e2e.Failf("Failed to get microshift-etcd.scope status.")
}
g.By("4. Restart microshift")
waitForMicroshiftAfterRestart(oc, masterNodes[0])
g.By("5. Check etcd status is running and active, after successful restart")
opStatus, err := exutil.DebugNodeWithOptionsAndChroot(oc, masterNode, []string{"-q"}, "bash", "-c", "systemctl status microshift-etcd.scope")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(opStatus, "Active: active (running)") {
e2e.Logf("microshift-etcd.scope status is: %v ", opStatus)
} else {
e2e.Failf("Failed to get microshift-etcd.scope status.")
}
}) | |||||
test case | openshift/openshift-tests-private | 1fe3a0bf-c744-43d7-be70-7927ff5d4147 | NonHyperShiftHOST-NonPreRelease-Author:geliu-Critical-66829-Tuning etcd latency parameters etcd_heartbeat_interval and etcd_election_timeout. [Disruptive] | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-NonPreRelease-Author:geliu-Critical-66829-Tuning etcd latency parameters etcd_heartbeat_interval and etcd_election_timeout. [Disruptive]", func() {
defer func() {
e2e.Logf("Patch etcd cluster:controlPlaneHardwareSpeed for recovery.")
patchPath1 := "{\"spec\":{\"controlPlaneHardwareSpeed\":null}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
}()
e2e.Logf("patch etcd cluster to stardard.")
patchPath1 := "{\"spec\":{\"controlPlaneHardwareSpeed\":\"Standard\"}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
e2e.Logf("Force an etcd rollout, restart all etcd pods at a time to pick up the new values")
t := time.Now()
defer func() {
e2e.Logf("Patch etcd cluster:forceRedeploymentReason for recovery.")
patchPath1 := "{\"spec\":{\"forceRedeploymentReason\":null}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
}()
err0 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"forceRedeploymentReason\": \"hardwareSpeedChange-%s\"}}", t.Format("2023-01-02 15:04:05"))).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
e2e.Logf("Check the ETCD_ELECTION_TIMEOUT and ETCD_HEARTBEAT_INTERVAL in etcd pod.")
etcdPodList := getPodListByLabel(oc, "etcd=true")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[8].value}").Output()
if output != "1000" || err != nil {
e2e.Failf("ETCD_ELECTION_TIMEOUT is not default value: 1000")
}
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[13].value}").Output()
if output != "100" || err != nil {
e2e.Failf("ETCD_HEARTBEAT_INTERVAL is not default value: 100")
}
e2e.Logf("patch etcd cluster to Slower.")
patchPath1 = "{\"spec\":{\"controlPlaneHardwareSpeed\":\"Slower\"}}"
err0 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath1).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
e2e.Logf("Force an etcd rollout, restart all etcd pods at a time to pick up the new values")
err0 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", fmt.Sprintf("{\"spec\": {\"forceRedeploymentReason\": \"hardwareSpeedChange-%s\"}}", t.Format("2023-01-02 15:05:05"))).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
checkOperator(oc, "etcd")
e2e.Logf("Check the ETCD_ELECTION_TIMEOUT and ETCD_HEARTBEAT_INTERVAL in etcd pod.")
etcdPodList = getPodListByLabel(oc, "etcd=true")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[8].value}").Output()
if output != "2500" || err != nil {
e2e.Failf("ETCD_ELECTION_TIMEOUT is not expected value: 2500")
}
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[13].value}").Output()
if output != "500" || err != nil {
e2e.Failf("ETCD_HEARTBEAT_INTERVAL is not expected value: 500")
}
}) | |||||
test case | openshift/openshift-tests-private | a958407c-ace9-49e4-8842-51161f3f146a | NonHyperShiftHOST-NonPreRelease-Author:geliu-High-71790-Etcd db defragment manually. [Disruptive] | ['"fmt"', '"strconv"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-NonPreRelease-Author:geliu-High-71790-Etcd db defragment manually. [Disruptive]", func() {
g.By("Find the etcd leader pods and record each db size.")
e2e.Logf("Discover all the etcd pods")
etcdPodList := getPodListByLabel(oc, "etcd=true")
etcdMemDbSize := make(map[string]int)
etcdMemDbSizeLater := make(map[string]int)
etcdLeaderPod := ""
for _, etcdPod := range etcdPodList {
e2e.Logf("login etcd pod: %v to get etcd member db size.", etcdPod)
etcdCmd := "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $4}'"
output, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
etcdMemDbSize[etcdPod], _ = strconv.Atoi(output)
e2e.Logf("login etcd pod: %v to check endpoints status.", etcdPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $6}'"
output, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "true") {
etcdLeaderPod = etcdPod
} else {
e2e.Logf("login non-leader etcd pod: %v to do defrag db.", etcdPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 defrag"
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("login non-leader etcd pod: %v to record db size after defrag.", etcdPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $4}'"
output, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
etcdMemDbSizeLater[etcdPod], _ = strconv.Atoi(output)
}
}
e2e.Logf("login etcd leader pod: %v to do defrag db.", etcdLeaderPod)
etcdCmd := "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 defrag"
_, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("login etcd leader pod: %v to record db size after defrag.", etcdLeaderPod)
etcdCmd = "unset ETCDCTL_ENDPOINTS;etcdctl --command-timeout=30s --endpoints=https://localhost:2379 endpoint status |awk '{print $4}'"
output, err := exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
etcdMemDbSizeLater[etcdLeaderPod], _ = strconv.Atoi(output)
e2e.Logf(fmt.Sprintf("etcdleaderPod: %v", etcdLeaderPod))
g.By("Compare etcd db size before/after defrage.")
e2e.Logf("etcd db size before defrag.")
for k, v := range etcdMemDbSize {
e2e.Logf("etcd pod name: %v, db size: %d", k, v)
}
e2e.Logf("etcd db size after defrag.")
for k, v := range etcdMemDbSizeLater {
e2e.Logf("etcd pod name: %v, db size: %d", k, v)
}
for k, v := range etcdMemDbSize {
if v <= etcdMemDbSizeLater[k] {
e2e.Failf("etcd: %v db size is not reduce after defrag.", k)
}
}
g.By("Clear it if any NOSPACE alarms.")
etcdCmd = "etcdctl alarm list"
output, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
if output != "" {
etcdCmd = "etcdctl alarm disarm"
_, err = exutil.RemoteShPod(oc, "openshift-etcd", etcdLeaderPod, "sh", "-c", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}
}) | |||||
test case | openshift/openshift-tests-private | 08e7140a-d764-4711-9496-482612b37b5e | NonHyperShiftHOST-NonPreRelease-Author:geliu-High-73511-Selectable etcd database size. [Disruptive] | ['"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("NonHyperShiftHOST-NonPreRelease-Author:geliu-High-73511-Selectable etcd database size. [Disruptive]", func() {
g.By("check cluster has enabled TechPreviewNoUpgradec.")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if featureSet != "TechPreviewNoUpgrade" {
g.Skip("featureSet is not TechPreviewNoUpgradec, skip it!")
}
defer func() {
patchPath := "{\"spec\":{\"backendQuotaGiB\": 8}}"
output, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath).Output()
if strings.Contains(output, "etcd backendQuotaGiB may not be decreased") {
e2e.Logf("etcd backendQuotaGiB may not be decreased: %v ", output)
}
checkOperator(oc, "etcd")
}()
g.By("patch etcd cluster backendQuotaGiB to 16G.")
patchPath := "{\"spec\":{\"backendQuotaGiB\": 16}}"
err0 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd", "cluster", "--type=merge", "-p", patchPath).Execute()
o.Expect(err0).NotTo(o.HaveOccurred())
g.By("waiting for etcd rollout automatically, restart all etcd pods at a time to pick up the new values")
checkOperator(oc, "etcd")
g.By("verify ETCD_QUOTA_BACKEND_BYTES value in etcd pods.")
etcdPodList := getPodListByLabel(oc, "etcd=true")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "pod", etcdPodList[0], "-o=jsonpath={.spec.containers[0].env[16].value}").Output()
if output != "17179869184" || err != nil {
e2e.Failf("ETCD_QUOTA_BACKEND_BYTES is not expected value: 17179869184")
}
}) | |||||
test case | openshift/openshift-tests-private | e4048d94-1180-4341-8256-9cbd1c693e10 | Author:geliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-75259-Auto rotation of etcd signer certs from ocp 4.17. [Disruptive] | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("Author:geliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-75259-Auto rotation of etcd signer certs from ocp 4.17. [Disruptive]", func() {
g.By("Check the remaining lifetime of the signer certificate in openshift-etcd namespace.")
certificateNotBefore0, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io\\/certificate-not-before}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
certificateNotAfter0, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io\\/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("etcd signer certificate expired Not After: %v", certificateNotAfter0)
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err))
}
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after running case :: %v ", err)
}
}()
g.By("update the existing signer: when notAfter or notBefore is malformed.")
err = oc.AsAdmin().Run("patch").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-p", fmt.Sprintf("{\"metadata\": {\"annotations\": {\"auth.openshift.io/certificate-not-after\": \"%s\"}}}", certificateNotBefore0), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for etcd-signer rotation and cluster health.")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=30m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after delete etcd-signer :: %v ", err)
}
g.By("2nd Check the remaining lifetime of the new signer certificate in openshift-etcd namespace")
certificateNotAfter1, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
layout := "2006-01-02T15:04:05Z"
timeStr0, perr := time.Parse(layout, certificateNotAfter0)
o.Expect(perr).NotTo(o.HaveOccurred())
timeStr1, perr := time.Parse(layout, certificateNotAfter1)
o.Expect(perr).NotTo(o.HaveOccurred())
if timeStr1.Before(timeStr0) || timeStr1.Equal(timeStr0) {
e2e.Failf(fmt.Sprintf("etcd-signer certificate-not-after time value is wrong for new one %s is not after old one %s.", timeStr1, timeStr0))
}
}) | |||||
test case | openshift/openshift-tests-private | 69b5ed40-515d-4d9a-8f2c-9cd3e2195c7e | Author:geliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-75224-Manual rotation of etcd signer certs from ocp 4.17. [Disruptive] | ['"fmt"', '"strconv"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/etcd/etcd_tests.go | g.It("Author:geliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-75224-Manual rotation of etcd signer certs from ocp 4.17. [Disruptive]", func() {
g.By("Check the remaining lifetime of the signer certificate in openshift-etcd namespace.")
certificateNotAfter0, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io\\/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("etcd signer certificate expired Not After: %v", certificateNotAfter0)
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err))
}
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after running case :: %v ", err)
}
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Delete the existing signer.")
_, err = oc.AsAdmin().Run("delete").Args("-n", "openshift-etcd", "secret", "etcd-signer").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for etcd-signer rotation and cluster health.")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=40m").Execute()
if err != nil {
e2e.Failf("Cluster health check failed after delete etcd-signer :: %v ", err)
}
g.By("Check revision again, the output means that the last revision is >= 8")
revisionValue, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "configmap", "etcd-all-bundles", "-o=jsonpath={.metadata.annotations.openshift\\.io\\/ceo-bundle-rollout-revision}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
revisionValueInt, err := strconv.Atoi(revisionValue)
o.Expect(err).NotTo(o.HaveOccurred())
if revisionValueInt <= 8 {
e2e.Failf(fmt.Sprintf("etcd-signer revision value is %s, but not >=8", revisionValue))
}
g.By("2nd Check the remaining lifetime of the new signer certificate in openshift-etcd namespace")
certificateNotAfter1, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-etcd", "secret", "etcd-signer", "-o=jsonpath={.metadata.annotations.auth\\.openshift\\.io/certificate-not-after}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
layout := "2006-01-02T15:04:05Z"
timeStr0, perr := time.Parse(layout, certificateNotAfter0)
o.Expect(perr).NotTo(o.HaveOccurred())
timeStr1, perr := time.Parse(layout, certificateNotAfter1)
o.Expect(perr).NotTo(o.HaveOccurred())
if timeStr1.Before(timeStr0) || timeStr1.Equal(timeStr0) {
e2e.Failf(fmt.Sprintf("etcd-signer certificate-not-after time value is wrong for new one %s is not after old one %s.", timeStr1, timeStr0))
}
}) | |||||
file | openshift/openshift-tests-private | 922db2d7-029c-4808-9cb8-698ad436e716 | azure | import (
"errors"
"fmt"
"net/url"
"strings"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/azure.go | package hypershift
import (
"errors"
"fmt"
"net/url"
"strings"
)
type azureKMSKey struct {
keyName string
keyVaultName string
keyVersion string
}
// The vault key URI is expected to be in the format:
// https://<KEYVAULT_NAME>.vault.azure.net/keys/<KEYVAULT_KEY_NAME>/<KEYVAULT_KEY_VERSION>
func parseAzureVaultKeyURI(vaultKeyURI string) (azureKMSKey, error) {
parsedURL, err := url.Parse(vaultKeyURI)
if err != nil {
return azureKMSKey{}, err
}
hostParts := strings.Split(parsedURL.Host, ".")
if len(hostParts) != 4 {
return azureKMSKey{}, errors.New("invalid host format")
}
keyVaultName := hostParts[0]
pathParts := strings.Split(strings.Trim(parsedURL.Path, "/"), "/")
if len(pathParts) != 3 {
return azureKMSKey{}, errors.New("invalid path format")
}
keyName := pathParts[1]
keyVersion := pathParts[2]
return azureKMSKey{
keyName: keyName,
keyVaultName: keyVaultName,
keyVersion: keyVersion,
}, nil
}
func getHCPatchForAzureKMS(activeKey, backupKey *azureKMSKey) (string, error) {
if activeKey == nil && backupKey == nil {
return "", errors.New("at least one of activeKey or backupKey must be non-nil")
}
patch := `
spec:
secretEncryption:
kms:
azure:
`
if activeKey != nil {
patch += fmt.Sprintf(` activeKey:
keyName: %s
keyVaultName: %s
keyVersion: %s
`, activeKey.keyName, activeKey.keyVaultName, activeKey.keyVersion)
}
if backupKey != nil {
patch += fmt.Sprintf(` backupKey:
keyName: %s
keyVaultName: %s
keyVersion: %s
`, backupKey.keyName, backupKey.keyVaultName, backupKey.keyVersion)
}
return patch, nil
}
type azureNodepoolImageType string
const (
azureNodepoolImageTypeMarketplace azureNodepoolImageType = "AzureMarketplace"
azureNodepoolImageTypeId azureNodepoolImageType = "ImageID"
)
type azureMarketplaceImage struct {
Offer string `param:"marketplace-offer"`
Publisher string `param:"marketplace-publisher"`
SKU string `param:"marketplace-sku"`
Version string `param:"marketplace-version"`
}
| package hypershift | ||||
function | openshift/openshift-tests-private | b5f1ff56-716b-486f-b1b2-8cc9911e5c83 | parseAzureVaultKeyURI | ['"errors"', '"net/url"', '"strings"'] | ['azureKMSKey'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/azure.go | func parseAzureVaultKeyURI(vaultKeyURI string) (azureKMSKey, error) {
parsedURL, err := url.Parse(vaultKeyURI)
if err != nil {
return azureKMSKey{}, err
}
hostParts := strings.Split(parsedURL.Host, ".")
if len(hostParts) != 4 {
return azureKMSKey{}, errors.New("invalid host format")
}
keyVaultName := hostParts[0]
pathParts := strings.Split(strings.Trim(parsedURL.Path, "/"), "/")
if len(pathParts) != 3 {
return azureKMSKey{}, errors.New("invalid path format")
}
keyName := pathParts[1]
keyVersion := pathParts[2]
return azureKMSKey{
keyName: keyName,
keyVaultName: keyVaultName,
keyVersion: keyVersion,
}, nil
} | hypershift | |||
function | openshift/openshift-tests-private | cf5a3b0b-c0da-4f0c-84ff-df3c13f3874e | getHCPatchForAzureKMS | ['"errors"', '"fmt"'] | ['azureKMSKey'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/azure.go | func getHCPatchForAzureKMS(activeKey, backupKey *azureKMSKey) (string, error) {
if activeKey == nil && backupKey == nil {
return "", errors.New("at least one of activeKey or backupKey must be non-nil")
}
patch := `
spec:
secretEncryption:
kms:
azure:
`
if activeKey != nil {
patch += fmt.Sprintf(` activeKey:
keyName: %s
keyVaultName: %s
keyVersion: %s
`, activeKey.keyName, activeKey.keyVaultName, activeKey.keyVersion)
}
if backupKey != nil {
patch += fmt.Sprintf(` backupKey:
keyName: %s
keyVaultName: %s
keyVersion: %s
`, backupKey.keyName, backupKey.keyVaultName, backupKey.keyVersion)
}
return patch, nil
} | hypershift | |||
file | openshift/openshift-tests-private | aab91fed-4356-4b1d-88e3-9665aac24af7 | client-go-utils | import (
"context"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/client-go-utils.go | package hypershift
import (
"context"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type watchInfo struct {
// svc to watch
namespace string
resourceType K8SResource
// event handler func
addFunc func(obj interface{})
updateFunc func(oldObj interface{}, newObj interface{})
deleteFunc func(obj interface{})
//name of resource, informer only watches the crd in the namespace instead of resource name
//you need to compare the resource name in the event handler to check if it is the target name
name string
}
func startWatch(ctx context.Context, kubeconfigPath string, info watchInfo) error {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return err
}
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
go staticWatch(ctx, clientSet, info)
return nil
}
func staticWatch(ctx context.Context, clientSet kubernetes.Interface, info watchInfo) {
fac := informers.NewSharedInformerFactoryWithOptions(clientSet, 0, informers.WithNamespace(info.namespace))
var informer cache.SharedIndexInformer
switch info.resourceType {
case Service:
informer = fac.Core().V1().Services().Informer()
default:
e2e.Logf("invalid resource type %s, return", string(info.resourceType))
return
}
_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: info.addFunc,
DeleteFunc: info.deleteFunc,
UpdateFunc: info.updateFunc,
})
if err != nil {
e2e.Logf("AddEventHandler err for resource %s %s in %s, err %s, return", string(info.resourceType), info.name, info.namespace, err.Error())
return
}
e2e.Logf("start informer event watch for %s: %s %s", string(info.resourceType), info.namespace, info.name)
informer.Run(ctx.Done())
e2e.Logf("ctx Done %s, exit watching %s: %s %s", ctx.Err(), string(info.resourceType), info.namespace, info.name)
}
type operatorWatchInfo struct {
//CRD GVR to watch
group string
version string
resources string
// CR namespace to watch
namespace string
// event handler func, Parameter []byte can be used to Unmarshal into the specified crd structure
addFunc func(obj []byte)
updateFunc func(oldObj []byte, newObj []byte)
deleteFunc func(obj []byte)
//name of cr resource, informer only watches the crd in the namespace instead of resource name
//you need to compare the resource name in the event handler to check if it is the target name
name string
}
func startWatchOperator(ctx context.Context, kubeconfigPath string, info operatorWatchInfo) error {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return err
}
go watchOperator(ctx, dynamicClient, info)
return nil
}
func watchOperator(ctx context.Context, client dynamic.Interface, info operatorWatchInfo) {
fac := dynamicinformer.NewFilteredDynamicSharedInformerFactory(client, 0, info.namespace, nil)
informer := fac.ForResource(schema.GroupVersionResource{
Group: info.group,
Version: info.version,
Resource: info.resources,
}).Informer()
eventHandler := cache.ResourceEventHandlerFuncs{}
if info.addFunc != nil {
eventHandler.AddFunc = func(obj interface{}) {
typedObj := obj.(*unstructured.Unstructured)
bytes, _ := typedObj.MarshalJSON()
info.addFunc(bytes)
}
}
if info.deleteFunc != nil {
eventHandler.DeleteFunc = func(obj interface{}) {
typedObj := obj.(*unstructured.Unstructured)
bytes, _ := typedObj.MarshalJSON()
info.deleteFunc(bytes)
}
}
if info.updateFunc != nil {
eventHandler.UpdateFunc = func(oldObj interface{}, newObj interface{}) {
typedObj := oldObj.(*unstructured.Unstructured)
oldObjBytes, err := typedObj.MarshalJSON()
if err != nil {
return
}
typedObj = newObj.(*unstructured.Unstructured)
newObjBytes, err := typedObj.MarshalJSON()
if err != nil {
return
}
info.updateFunc(oldObjBytes, newObjBytes)
}
}
_, err := informer.AddEventHandler(eventHandler)
if err != nil {
e2e.Logf("AddEventHandler err for %s %s in %s, err %s, return", info.resources, info.name, info.namespace, err.Error())
return
}
e2e.Logf("start informer event watch for %s.%s %s %s", info.resources, info.group, info.namespace, info.name)
informer.Run(ctx.Done())
e2e.Logf("ctx Done %s, exit watching %s.%s %s %s", ctx.Err(), info.resources, info.group, info.namespace, info.name)
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 4357a26f-975b-4cb4-a7d1-37a86ae324fc | startWatch | ['"context"', '"k8s.io/client-go/kubernetes"', '"k8s.io/client-go/tools/clientcmd"'] | ['watchInfo'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client-go-utils.go | func startWatch(ctx context.Context, kubeconfigPath string, info watchInfo) error {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return err
}
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
go staticWatch(ctx, clientSet, info)
return nil
} | hypershift |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.