element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | 33a0246d-d7e0-4f2d-aa47-1adeac3a8016 | getPodIPv4 | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func getPodIPv4(oc *exutil.CLI, podName string, ns string) string {
IPv4add, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-o=jsonpath={.status.podIP}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nPod IP address is %v", IPv4add)
return IPv4add
} | cet | |||||
function | openshift/openshift-tests-private | 7e23a71b-86bf-4a14-94fa-c29b163c5878 | pingIpaddr | ['"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func pingIpaddr(oc *exutil.CLI, ns string, podName string, cmd string) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, podName, "--", "/bin/bash", "-c", cmd).OutputToFile("pingipaddr.txt")
o.Expect(err).NotTo(o.HaveOccurred())
result, err1 := exec.Command("bash", "-c", "cat "+status+" | egrep '64 bytes from 8.8.8.8: icmp_seq'").Output()
if err1 != nil {
e2e.Failf("the result of ReadFile:%v", err1)
return false, nil
}
e2e.Logf("\nPing output is %s\n", result)
if strings.Contains(string(result), "64 bytes from 8.8.8.8: icmp_seq") {
e2e.Logf("\nPing Successful \n")
return true, nil
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 4b2501ca-5426-4163-a15f-24e90d2f3484 | checkProxy | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func checkProxy(oc *exutil.CLI) bool {
httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpsProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if httpProxy != "" || httpsProxy != "" {
return true
}
return false
} | cet | |||||
function | openshift/openshift-tests-private | b790ac64-5d6f-4eab-b35f-8151a6b34bec | getmcpStatus | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func getmcpStatus(oc *exutil.CLI, nodeSelector string) error {
return wait.Poll(10*time.Second, 15*time.Minute, func() (bool, error) {
mCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.machineCount}").Output()
unmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.unavailableMachineCount}").Output()
dmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.degradedMachineCount}").Output()
rmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.readyMachineCount}").Output()
e2e.Logf("MachineCount:%v unavailableMachineCount:%v degradedMachineCount:%v ReadyMachineCount:%v", mCount, unmCount, dmCount, rmCount)
if strings.Compare(mCount, rmCount) == 0 && strings.Compare(unmCount, dmCount) == 0 {
return true, nil
}
return false, nil
})
} | cet | ||||
file | openshift/openshift-tests-private | 8b51a151-6b1e-4125-8b8d-7befcf2c0fb3 | install | import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"text/template"
"github.com/openshift/openshift-tests-private/test/extended/testdata"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/csi/install.go | package csi
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"text/template"
"github.com/openshift/openshift-tests-private/test/extended/testdata"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
const (
csiBasePath = "test/extended/testdata/csi"
defaultImageFormat = "registry.svc.ci.openshift.org/origin/4.3:${component}"
imageFormatVariable = "IMAGE_FORMAT"
)
// InstallCSIDriver installs a CSI driver and defines its tests.
// It applies "test/extended/csi/<driverName>/install-template.yaml" and
// returns path to test manifest "test/extended/csi/<driverName>/manifest.yaml"
func InstallCSIDriver(driverName string, dryRun bool) (string, error) {
// The driver name comes from an user and we want a nice error message instead
// of panic in FixturePath().
templatePath := filepath.Join(csiBasePath, driverName, "install-template.yaml")
if _, err := testdata.AssetInfo(templatePath); err != nil {
return "", fmt.Errorf("failed to install CSI driver %q: %s", driverName, err)
}
manifestPath := filepath.Join(csiBasePath, driverName, "manifest.yaml")
if _, err := testdata.AssetInfo(manifestPath); err != nil {
return "", fmt.Errorf("failed to install CSI driver %q: %s", driverName, err)
}
// storageclass.yaml is optional, so we don't return and error if it's absent
scPath := filepath.Join(csiBasePath, driverName, "storageclass.yaml")
if _, err := testdata.AssetInfo(scPath); err == nil {
scFixturePath := strings.Split(scPath, string(os.PathSeparator))[2:]
exutil.FixturePath(scFixturePath...)
}
// Convert to array and cut "test/extended" for FixturePath()
templateFixturePath := strings.Split(templatePath, string(os.PathSeparator))[2:]
yamlPath, err := executeTemplate(exutil.FixturePath(templateFixturePath...))
defer os.Remove(yamlPath)
if err != nil {
return "", err
}
if !dryRun {
// Install the driver
oc := exutil.NewCLIWithoutNamespace("csi-install")
if err := oc.Run("apply").Args("-f", yamlPath).Execute(); err != nil {
return "", fmt.Errorf("failed to apply %s: %s", yamlPath, err)
}
}
// Cut "test/extended" for FixturePath()
manifestFixturePath := strings.Split(manifestPath, string(os.PathSeparator))[2:]
return exutil.FixturePath(manifestFixturePath...), nil
}
// ListCSIDrivers returns list of hardcoded CSI drivers, i.e. list of directories in "test/extended/csi".
func ListCSIDrivers() ([]string, error) {
return testdata.AssetDir(csiBasePath)
}
// Executes given golang template file and returns path to resulting file.
func executeTemplate(templatePath string) (string, error) {
tmpl, err := template.ParseFiles(templatePath)
if err != nil {
return "", err
}
yamlFile, err := ioutil.TempFile("", "openshift-tests-csi-*")
if err != nil {
return "", err
}
yamlPath := yamlFile.Name()
imageFormat := os.Getenv(imageFormatVariable)
if imageFormat == "" {
imageFormat = defaultImageFormat
}
variables := struct{ AttacherImage, ProvisionerImage, NodeDriverRegistrarImage, LivenessProbeImage, ImageFormat string }{
AttacherImage: strings.ReplaceAll(imageFormat, "${component}", "csi-external-attacher"),
ProvisionerImage: strings.ReplaceAll(imageFormat, "${component}", "csi-external-provisioner"),
NodeDriverRegistrarImage: strings.ReplaceAll(imageFormat, "${component}", "csi-node-driver-registrar"),
LivenessProbeImage: strings.ReplaceAll(imageFormat, "${component}", "csi-livenessprobe"),
ImageFormat: imageFormat,
}
err = tmpl.Execute(yamlFile, variables)
yamlFile.Close()
if err != nil {
os.Remove(yamlPath)
return "", err
}
return yamlPath, nil
}
| package csi | ||||
function | openshift/openshift-tests-private | ac46e3d5-0d5f-4246-8257-00843475000e | InstallCSIDriver | ['"fmt"', '"os"', '"path/filepath"', '"strings"', '"text/template"', '"github.com/openshift/openshift-tests-private/test/extended/testdata"'] | github.com/openshift/openshift-tests-private/test/extended/csi/install.go | func InstallCSIDriver(driverName string, dryRun bool) (string, error) {
// The driver name comes from an user and we want a nice error message instead
// of panic in FixturePath().
templatePath := filepath.Join(csiBasePath, driverName, "install-template.yaml")
if _, err := testdata.AssetInfo(templatePath); err != nil {
return "", fmt.Errorf("failed to install CSI driver %q: %s", driverName, err)
}
manifestPath := filepath.Join(csiBasePath, driverName, "manifest.yaml")
if _, err := testdata.AssetInfo(manifestPath); err != nil {
return "", fmt.Errorf("failed to install CSI driver %q: %s", driverName, err)
}
// storageclass.yaml is optional, so we don't return and error if it's absent
scPath := filepath.Join(csiBasePath, driverName, "storageclass.yaml")
if _, err := testdata.AssetInfo(scPath); err == nil {
scFixturePath := strings.Split(scPath, string(os.PathSeparator))[2:]
exutil.FixturePath(scFixturePath...)
}
// Convert to array and cut "test/extended" for FixturePath()
templateFixturePath := strings.Split(templatePath, string(os.PathSeparator))[2:]
yamlPath, err := executeTemplate(exutil.FixturePath(templateFixturePath...))
defer os.Remove(yamlPath)
if err != nil {
return "", err
}
if !dryRun {
// Install the driver
oc := exutil.NewCLIWithoutNamespace("csi-install")
if err := oc.Run("apply").Args("-f", yamlPath).Execute(); err != nil {
return "", fmt.Errorf("failed to apply %s: %s", yamlPath, err)
}
}
// Cut "test/extended" for FixturePath()
manifestFixturePath := strings.Split(manifestPath, string(os.PathSeparator))[2:]
return exutil.FixturePath(manifestFixturePath...), nil
} | csi | ||||
function | openshift/openshift-tests-private | d3b792e7-77bb-41a6-b7d0-5c46a6f05ca4 | ListCSIDrivers | ['"github.com/openshift/openshift-tests-private/test/extended/testdata"'] | github.com/openshift/openshift-tests-private/test/extended/csi/install.go | func ListCSIDrivers() ([]string, error) {
return testdata.AssetDir(csiBasePath)
} | csi | ||||
function | openshift/openshift-tests-private | 35817174-f844-4e38-bb6e-d9bb5df4c8c6 | executeTemplate | ['"io/ioutil"', '"os"', '"strings"', '"text/template"'] | github.com/openshift/openshift-tests-private/test/extended/csi/install.go | func executeTemplate(templatePath string) (string, error) {
tmpl, err := template.ParseFiles(templatePath)
if err != nil {
return "", err
}
yamlFile, err := ioutil.TempFile("", "openshift-tests-csi-*")
if err != nil {
return "", err
}
yamlPath := yamlFile.Name()
imageFormat := os.Getenv(imageFormatVariable)
if imageFormat == "" {
imageFormat = defaultImageFormat
}
variables := struct{ AttacherImage, ProvisionerImage, NodeDriverRegistrarImage, LivenessProbeImage, ImageFormat string }{
AttacherImage: strings.ReplaceAll(imageFormat, "${component}", "csi-external-attacher"),
ProvisionerImage: strings.ReplaceAll(imageFormat, "${component}", "csi-external-provisioner"),
NodeDriverRegistrarImage: strings.ReplaceAll(imageFormat, "${component}", "csi-node-driver-registrar"),
LivenessProbeImage: strings.ReplaceAll(imageFormat, "${component}", "csi-livenessprobe"),
ImageFormat: imageFormat,
}
err = tmpl.Execute(yamlFile, variables)
yamlFile.Close()
if err != nil {
os.Remove(yamlPath)
return "", err
}
return yamlPath, nil
} | csi | ||||
file | openshift/openshift-tests-private | 90c1ed1d-e001-4f0e-80c1-b0c3ab41aaaa | compute_aws | import (
"fmt"
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | package disasterrecovery
import (
"fmt"
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type awsInstance struct {
instance
client *exutil.AwsClient
}
// GetAwsNodes get nodes and load clouds cred with the specified label.
func GetAwsNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
clusterinfra.GetAwsCredentialFromCluster(oc)
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newAwsInstance(oc, exutil.InitAwsSession(), nodeName))
}
return results, nil
}
func (a *awsInstance) GetInstanceID() (string, error) {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
if err != nil {
e2e.Logf("Get instance id failed with error :: %v.", err)
return "", err
}
return instanceID, nil
}
func newAwsInstance(oc *exutil.CLI, client *exutil.AwsClient, nodeName string) *awsInstance {
return &awsInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
}
}
func (a *awsInstance) Start() error {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
instanceState, err := a.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = a.client.StartInstance(instanceID)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", a.nodeName, instanceState)
}
return nil
}
func (a *awsInstance) Stop() error {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
instanceState, err := a.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = a.client.StopInstance(instanceID)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", a.nodeName, instanceState)
}
return nil
}
func (a *awsInstance) State() (string, error) {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
instanceState, err := a.client.GetAwsInstanceState(instanceID)
if err != nil {
e2e.Logf("Get instance state failed with error :: %v.", err)
return "", err
}
return strings.ToLower(instanceState), nil
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | a22b7d4d-72c9-43d8-988d-bae48cebed76 | GetAwsNodes | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | func GetAwsNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
clusterinfra.GetAwsCredentialFromCluster(oc)
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newAwsInstance(oc, exutil.InitAwsSession(), nodeName))
}
return results, nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | a92cfa82-100d-4d3e-8014-835bb2d5fc50 | GetInstanceID | ['awsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | func (a *awsInstance) GetInstanceID() (string, error) {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
if err != nil {
e2e.Logf("Get instance id failed with error :: %v.", err)
return "", err
}
return instanceID, nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | e8f0f2ba-6f75-4f6f-a393-670ed08bcb59 | newAwsInstance | ['awsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | func newAwsInstance(oc *exutil.CLI, client *exutil.AwsClient, nodeName string) *awsInstance {
return &awsInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | d1e99a7e-7127-4986-8e80-6f2343d63d10 | Start | ['"fmt"'] | ['awsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | func (a *awsInstance) Start() error {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
instanceState, err := a.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = a.client.StartInstance(instanceID)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", a.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 5d55607e-e7b0-418f-a822-b4b97e079b04 | Stop | ['"fmt"'] | ['awsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | func (a *awsInstance) Stop() error {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
instanceState, err := a.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = a.client.StopInstance(instanceID)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", a.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | b77bbd31-2e02-443d-a236-94ef3f46031f | State | ['"strings"'] | ['awsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_aws.go | func (a *awsInstance) State() (string, error) {
instanceID, err := a.client.GetAwsInstanceIDFromHostname(a.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
instanceState, err := a.client.GetAwsInstanceState(instanceID)
if err != nil {
e2e.Logf("Get instance state failed with error :: %v.", err)
return "", err
}
return strings.ToLower(instanceState), nil
} | disasterrecovery | |||
file | openshift/openshift-tests-private | e421acb0-716e-4c61-8e42-ef92b248cc22 | compute_azure | import (
"fmt"
"os"
"os/exec"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | package disasterrecovery
import (
"fmt"
"os"
"os/exec"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type azureInstance struct {
instance
azureRGname string
client *exutil.AzureSession
azureCloudType string
}
// GetAzureNodes get nodes and load clouds cred with the specified label.
func GetAzureNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
azureRGname, rgerr := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(rgerr).NotTo(o.HaveOccurred())
azureSession, sessErr := exutil.NewAzureSessionFromEnv()
o.Expect(sessErr).NotTo(o.HaveOccurred())
isAzureStack, cloudName := isAzureStackCluster(oc)
if isAzureStack {
var filePath string
filePath = os.Getenv("SHARED_DIR") + "/azurestack-login-script.sh"
if _, err := os.Stat(filePath); err == nil {
e2e.Logf("File %s exists.\n", filePath)
} else if os.IsNotExist(err) {
g.Skip(fmt.Sprintf("File %s does not exist.\n", filePath))
} else {
g.Skip(fmt.Sprintf("Error checking file: %v\n", err))
}
cmd := fmt.Sprintf(`source %s`, filePath)
cmdop := exec.Command("bash", "-c", cmd)
_, cmdErr := cmdop.CombinedOutput()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
os.Setenv("REQUESTS_CA_BUNDLE", "/tmp/ca.pem")
vmOutput, azcmdErr := exec.Command("bash", "-c", `az vm list --output table`).Output()
if azcmdErr == nil && string(vmOutput) != "" {
logger.Debugf("Able to run azure cli successfully :: %s :: %s", vmOutput, azcmdErr)
} else {
e2e.Failf("Not able to run azure cli successfully :: %s :: %s", string(vmOutput), azcmdErr)
}
}
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newAzureInstance(oc, azureSession, azureRGname, nodeName, strings.ToLower(cloudName)))
}
return results, nil
}
func newAzureInstance(oc *exutil.CLI, client *exutil.AzureSession, azureRGname, nodeName string, azureCloudType string) *azureInstance {
return &azureInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
azureRGname: azureRGname,
azureCloudType: azureCloudType,
}
}
func (az *azureInstance) GetInstanceID() (string, error) {
instanceID, err := exutil.GetAzureVMInstance(az.client, az.nodeName, az.azureRGname)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
}
func (az *azureInstance) Start() error {
if az.azureCloudType == "azurestackcloud" {
err := exutil.StartAzureStackVM(az.azureRGname, az.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return err
}
_, err := exutil.StartAzureVM(az.client, az.nodeName, az.azureRGname)
o.Expect(err).NotTo(o.HaveOccurred())
return err
}
func (az *azureInstance) Stop() error {
if az.azureCloudType == "azurestackcloud" {
err := exutil.StopAzureStackVM(az.azureRGname, az.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return err
}
_, err := exutil.StopAzureVM(az.client, az.nodeName, az.azureRGname)
o.Expect(err).NotTo(o.HaveOccurred())
return err
}
func (az *azureInstance) State() (string, error) {
if az.azureCloudType == "azurestackcloud" {
instanceState, err := exutil.GetAzureStackVMStatus(az.azureRGname, az.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return instanceState, err
}
instanceState, err := exutil.GetAzureVMInstanceState(az.client, az.nodeName, az.azureRGname)
o.Expect(err).NotTo(o.HaveOccurred())
return instanceState, err
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | f7d7d959-3704-48a9-a85e-3cad9dcab1ae | GetAzureNodes | ['"fmt"', '"os"', '"os/exec"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | func GetAzureNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
azureRGname, rgerr := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(rgerr).NotTo(o.HaveOccurred())
azureSession, sessErr := exutil.NewAzureSessionFromEnv()
o.Expect(sessErr).NotTo(o.HaveOccurred())
isAzureStack, cloudName := isAzureStackCluster(oc)
if isAzureStack {
var filePath string
filePath = os.Getenv("SHARED_DIR") + "/azurestack-login-script.sh"
if _, err := os.Stat(filePath); err == nil {
e2e.Logf("File %s exists.\n", filePath)
} else if os.IsNotExist(err) {
g.Skip(fmt.Sprintf("File %s does not exist.\n", filePath))
} else {
g.Skip(fmt.Sprintf("Error checking file: %v\n", err))
}
cmd := fmt.Sprintf(`source %s`, filePath)
cmdop := exec.Command("bash", "-c", cmd)
_, cmdErr := cmdop.CombinedOutput()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
os.Setenv("REQUESTS_CA_BUNDLE", "/tmp/ca.pem")
vmOutput, azcmdErr := exec.Command("bash", "-c", `az vm list --output table`).Output()
if azcmdErr == nil && string(vmOutput) != "" {
logger.Debugf("Able to run azure cli successfully :: %s :: %s", vmOutput, azcmdErr)
} else {
e2e.Failf("Not able to run azure cli successfully :: %s :: %s", string(vmOutput), azcmdErr)
}
}
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newAzureInstance(oc, azureSession, azureRGname, nodeName, strings.ToLower(cloudName)))
}
return results, nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | ec9bc7ec-ea11-44c0-9115-a05d7a0334c2 | newAzureInstance | ['azureInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | func newAzureInstance(oc *exutil.CLI, client *exutil.AzureSession, azureRGname, nodeName string, azureCloudType string) *azureInstance {
return &azureInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
azureRGname: azureRGname,
azureCloudType: azureCloudType,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 5bf8a384-57b3-49c5-bacd-10e59ab6ecd0 | GetInstanceID | ['azureInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | func (az *azureInstance) GetInstanceID() (string, error) {
instanceID, err := exutil.GetAzureVMInstance(az.client, az.nodeName, az.azureRGname)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 656611b0-6bab-4a4a-adbf-a23e27fcb339 | Start | ['azureInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | func (az *azureInstance) Start() error {
if az.azureCloudType == "azurestackcloud" {
err := exutil.StartAzureStackVM(az.azureRGname, az.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return err
}
_, err := exutil.StartAzureVM(az.client, az.nodeName, az.azureRGname)
o.Expect(err).NotTo(o.HaveOccurred())
return err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 9b87704f-41fb-4249-9122-df11f475d0ad | Stop | ['azureInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | func (az *azureInstance) Stop() error {
if az.azureCloudType == "azurestackcloud" {
err := exutil.StopAzureStackVM(az.azureRGname, az.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return err
}
_, err := exutil.StopAzureVM(az.client, az.nodeName, az.azureRGname)
o.Expect(err).NotTo(o.HaveOccurred())
return err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | d136b386-9428-4ba4-9da5-5c70369a2d50 | State | ['azureInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_azure.go | func (az *azureInstance) State() (string, error) {
if az.azureCloudType == "azurestackcloud" {
instanceState, err := exutil.GetAzureStackVMStatus(az.azureRGname, az.nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return instanceState, err
}
instanceState, err := exutil.GetAzureVMInstanceState(az.client, az.nodeName, az.azureRGname)
o.Expect(err).NotTo(o.HaveOccurred())
return instanceState, err
} | disasterrecovery | ||||
file | openshift/openshift-tests-private | 7f882ef0-1b94-480d-9436-4edb93e8d13a | compute_baremetal | import (
"fmt"
"strings"
"time"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | package disasterrecovery
import (
"fmt"
"strings"
"time"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
type baremetalIPIInstance struct {
instance
}
func newBaremetalIPIInstance(oc *exutil.CLI, nodeName string) *baremetalIPIInstance {
return &baremetalIPIInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
}
}
// GetBaremetalNodes get nodes and load clouds cred with the specified label.
func GetBaremetalNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newBaremetalIPIInstance(oc, nodeName))
}
return results, nil
}
func (ipi *baremetalIPIInstance) GetInstanceID() (string, error) {
var masterNodeMachineConfig string
bmhOutput, bmhErr := ipi.oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", "openshift-machine-api", "-o", `jsonpath='{.items[*].metadata.name}'`).Output()
o.Expect(bmhErr).NotTo(o.HaveOccurred())
machineConfigOutput := strings.Fields(bmhOutput)
for i := 0; i < len(machineConfigOutput); i++ {
if strings.Contains(machineConfigOutput[i], ipi.nodeName) {
masterNodeMachineConfig = strings.ReplaceAll(machineConfigOutput[i], "'", "")
}
}
return masterNodeMachineConfig, bmhErr
}
func (ipi *baremetalIPIInstance) Start() error {
errVM := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(ipi.oc, ipi.nodeName)
if err != nil {
return false, nil
}
patch := `[{"op": "replace", "path": "/spec/online", "value": true}]`
startErr := ipi.oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if startErr != nil {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errVM, fmt.Sprintf("Not able to start %s", ipi.nodeName))
return errVM
}
func (ipi *baremetalIPIInstance) Stop() error {
errVM := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(ipi.oc, ipi.nodeName)
if err != nil {
return false, nil
}
patch := `[{"op": "replace", "path": "/spec/online", "value": false}]`
stopErr := ipi.oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if stopErr != nil {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errVM, fmt.Sprintf("Not able to stop %s", ipi.nodeName))
return errVM
}
func (ipi *baremetalIPIInstance) State() (string, error) {
nodeStatus, statusErr := ipi.oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", ipi.nodeName, "-o", `jsonpath={.status.conditions[3].type}`).Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
return strings.ToLower(nodeStatus), statusErr
}
// GetBmhNodeMachineConfig get bmh machineconfig name
func GetBmhNodeMachineConfig(oc *exutil.CLI, vmInstance string) (string, error) {
var masterNodeMachineConfig string
bmhOutput, bmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", "openshift-machine-api", "-o", `jsonpath='{.items[*].metadata.name}'`).Output()
o.Expect(bmhErr).NotTo(o.HaveOccurred())
machineConfigOutput := strings.Fields(bmhOutput)
for i := 0; i < len(machineConfigOutput); i++ {
if strings.Contains(machineConfigOutput[i], vmInstance) {
masterNodeMachineConfig = strings.ReplaceAll(machineConfigOutput[i], "'", "")
}
}
return masterNodeMachineConfig, bmhErr
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 9032f13f-451b-42a9-ad45-8dbe2770432a | newBaremetalIPIInstance | ['baremetalIPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func newBaremetalIPIInstance(oc *exutil.CLI, nodeName string) *baremetalIPIInstance {
return &baremetalIPIInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | de6bd5d7-1e47-47de-95bc-5903836ea0ff | GetBaremetalNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func GetBaremetalNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newBaremetalIPIInstance(oc, nodeName))
}
return results, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | b75b3d6f-06ff-497a-a1ba-53fe0a3e7ea9 | GetInstanceID | ['"strings"'] | ['baremetalIPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func (ipi *baremetalIPIInstance) GetInstanceID() (string, error) {
var masterNodeMachineConfig string
bmhOutput, bmhErr := ipi.oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", "openshift-machine-api", "-o", `jsonpath='{.items[*].metadata.name}'`).Output()
o.Expect(bmhErr).NotTo(o.HaveOccurred())
machineConfigOutput := strings.Fields(bmhOutput)
for i := 0; i < len(machineConfigOutput); i++ {
if strings.Contains(machineConfigOutput[i], ipi.nodeName) {
masterNodeMachineConfig = strings.ReplaceAll(machineConfigOutput[i], "'", "")
}
}
return masterNodeMachineConfig, bmhErr
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 6e1b5cd6-763a-468c-b7ea-a9b6d45928b7 | Start | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['baremetalIPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func (ipi *baremetalIPIInstance) Start() error {
errVM := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(ipi.oc, ipi.nodeName)
if err != nil {
return false, nil
}
patch := `[{"op": "replace", "path": "/spec/online", "value": true}]`
startErr := ipi.oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if startErr != nil {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errVM, fmt.Sprintf("Not able to start %s", ipi.nodeName))
return errVM
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 4ead54d3-3f91-447d-b859-10f2603d6938 | Stop | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['baremetalIPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func (ipi *baremetalIPIInstance) Stop() error {
errVM := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(ipi.oc, ipi.nodeName)
if err != nil {
return false, nil
}
patch := `[{"op": "replace", "path": "/spec/online", "value": false}]`
stopErr := ipi.oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if stopErr != nil {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errVM, fmt.Sprintf("Not able to stop %s", ipi.nodeName))
return errVM
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 0a8044be-7195-4a05-b218-930616bb7a5f | State | ['"strings"'] | ['baremetalIPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func (ipi *baremetalIPIInstance) State() (string, error) {
nodeStatus, statusErr := ipi.oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", ipi.nodeName, "-o", `jsonpath={.status.conditions[3].type}`).Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
return strings.ToLower(nodeStatus), statusErr
} | disasterrecovery | |||
function | openshift/openshift-tests-private | d66cb72e-9e2a-4833-8251-dca8165f8b91 | GetBmhNodeMachineConfig | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_baremetal.go | func GetBmhNodeMachineConfig(oc *exutil.CLI, vmInstance string) (string, error) {
var masterNodeMachineConfig string
bmhOutput, bmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", "openshift-machine-api", "-o", `jsonpath='{.items[*].metadata.name}'`).Output()
o.Expect(bmhErr).NotTo(o.HaveOccurred())
machineConfigOutput := strings.Fields(bmhOutput)
for i := 0; i < len(machineConfigOutput); i++ {
if strings.Contains(machineConfigOutput[i], vmInstance) {
masterNodeMachineConfig = strings.ReplaceAll(machineConfigOutput[i], "'", "")
}
}
return masterNodeMachineConfig, bmhErr
} | disasterrecovery | ||||
file | openshift/openshift-tests-private | 321eb823-37e9-47d9-8b3d-a4f5dd4181e5 | compute_gcp | import (
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | package disasterrecovery
import (
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type gcpInstance struct {
instance
projectID string
client *exutil.Gcloud
}
// GetGcpNodes get nodes and load clouds cred with the specified label.
func GetGcpNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).ToNot(o.HaveOccurred())
client := client(projectID)
var results []ComputeNode
for _, node := range nodeNames {
results = append(results, newGcpInstance(oc, client, projectID, strings.Split(node, ".")[0]))
}
return results, nil
}
func (g *gcpInstance) GetInstanceID() (string, error) {
instanceID, err := g.client.GetGcpInstanceByNode(g.nodeName)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
}
func newGcpInstance(oc *exutil.CLI, client *exutil.Gcloud, projectID, nodeName string) *gcpInstance {
return &gcpInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
projectID: projectID,
}
}
// GetGcloudClient to login on gcloud platform
func client(projectID string) *exutil.Gcloud {
if projectID != "openshift-qe" {
g.Skip("openshift-qe project is needed to execute this test case!")
}
gcloud := exutil.Gcloud{ProjectID: projectID}
return gcloud.Login()
}
func (g *gcpInstance) Start() error {
instanceState, err := g.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
nodeInstance := strings.Split(g.nodeName, ".")
zoneName, err := g.client.GetZone(g.nodeName, nodeInstance[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = g.client.StartInstance(nodeInstance[0], zoneName)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", g.nodeName, instanceState)
}
return nil
}
func (g *gcpInstance) Stop() error {
instanceState, err := g.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
nodeInstance := strings.Split(g.nodeName, ".")
zoneName, err := g.client.GetZone(g.nodeName, nodeInstance[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = g.client.StopInstanceAsync(nodeInstance[0], zoneName)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", g.nodeName, instanceState)
}
return nil
}
func (g *gcpInstance) State() (string, error) {
instanceState, err := g.client.GetGcpInstanceStateByNode(g.nodeName)
if err == nil {
e2e.Logf("VM %s is : %s", g.nodeName, strings.ToLower(instanceState))
return strings.ToLower(instanceState), nil
}
return "", err
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 75d52657-fcf2-4937-9d3c-e44b44276e2a | GetGcpNodes | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func GetGcpNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).ToNot(o.HaveOccurred())
client := client(projectID)
var results []ComputeNode
for _, node := range nodeNames {
results = append(results, newGcpInstance(oc, client, projectID, strings.Split(node, ".")[0]))
}
return results, nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 007fb24a-8bcf-4e7c-a26a-ea297ece8577 | GetInstanceID | ['gcpInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func (g *gcpInstance) GetInstanceID() (string, error) {
instanceID, err := g.client.GetGcpInstanceByNode(g.nodeName)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 0018e391-1086-4e14-9170-f155dac66948 | newGcpInstance | ['gcpInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func newGcpInstance(oc *exutil.CLI, client *exutil.Gcloud, projectID, nodeName string) *gcpInstance {
return &gcpInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
projectID: projectID,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | c7801ce4-38f1-4662-b93c-b7e2d6b644e8 | client | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func client(projectID string) *exutil.Gcloud {
if projectID != "openshift-qe" {
g.Skip("openshift-qe project is needed to execute this test case!")
}
gcloud := exutil.Gcloud{ProjectID: projectID}
return gcloud.Login()
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | b12b02db-1f14-48d7-8009-b5505657cc60 | Start | ['"fmt"', '"strings"'] | ['gcpInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func (g *gcpInstance) Start() error {
instanceState, err := g.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
nodeInstance := strings.Split(g.nodeName, ".")
zoneName, err := g.client.GetZone(g.nodeName, nodeInstance[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = g.client.StartInstance(nodeInstance[0], zoneName)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", g.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 788ba183-fbb2-463e-af4a-aad756375f93 | Stop | ['"fmt"', '"strings"'] | ['gcpInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func (g *gcpInstance) Stop() error {
instanceState, err := g.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
nodeInstance := strings.Split(g.nodeName, ".")
zoneName, err := g.client.GetZone(g.nodeName, nodeInstance[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = g.client.StopInstanceAsync(nodeInstance[0], zoneName)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", g.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 01ff0bcb-2793-4e89-8491-bf54b5832c5e | State | ['"strings"'] | ['gcpInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_gcp.go | func (g *gcpInstance) State() (string, error) {
instanceState, err := g.client.GetGcpInstanceStateByNode(g.nodeName)
if err == nil {
e2e.Logf("VM %s is : %s", g.nodeName, strings.ToLower(instanceState))
return strings.ToLower(instanceState), nil
}
return "", err
} | disasterrecovery | |||
file | openshift/openshift-tests-private | 2ab8c57b-4a2f-41b6-9791-93735980e21c | compute_ibm | import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | package disasterrecovery
import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type ibmInstance struct {
instance
ibmRegion string
ibmVpcName string
client *exutil.IBMSession
baseDomain string
}
// Get nodes and load clouds cred with the specified label.
func GetIbmNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
ibmApiKey, ibmRegion, ibmVpcName, credErr := exutil.GetIBMCredentialFromCluster(oc)
o.Expect(credErr).NotTo(o.HaveOccurred())
ibmSession, sessErr := exutil.NewIBMSessionFromEnv(ibmApiKey)
o.Expect(sessErr).NotTo(o.HaveOccurred())
baseDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-o=jsonpath={.spec.baseDomain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newIbmInstance(oc, ibmSession, ibmRegion, ibmVpcName, nodeName, baseDomain))
}
return results, nil
}
func newIbmInstance(oc *exutil.CLI, client *exutil.IBMSession, ibmRegion, ibmVpcName, nodeName string, baseDomain string) *ibmInstance {
return &ibmInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
ibmRegion: ibmRegion,
ibmVpcName: ibmVpcName,
baseDomain: baseDomain,
}
}
func (ibm *ibmInstance) GetInstanceID() (string, error) {
instanceID, err := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
}
func (ibm *ibmInstance) Start() error {
instanceID, idErr := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.StartIBMInstance(ibm.client, instanceID)
}
func (ibm *ibmInstance) Stop() error {
instanceID, idErr := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.StopIBMInstance(ibm.client, instanceID)
}
func (ibm *ibmInstance) State() (string, error) {
instanceID, idErr := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.GetIBMInstanceStatus(ibm.client, instanceID)
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 702941b5-1463-4478-a8e7-3d46bf96a127 | GetIbmNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | func GetIbmNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
ibmApiKey, ibmRegion, ibmVpcName, credErr := exutil.GetIBMCredentialFromCluster(oc)
o.Expect(credErr).NotTo(o.HaveOccurred())
ibmSession, sessErr := exutil.NewIBMSessionFromEnv(ibmApiKey)
o.Expect(sessErr).NotTo(o.HaveOccurred())
baseDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-o=jsonpath={.spec.baseDomain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newIbmInstance(oc, ibmSession, ibmRegion, ibmVpcName, nodeName, baseDomain))
}
return results, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | e3f9d49e-a6ee-4f1f-852c-c251756fbbe9 | newIbmInstance | ['ibmInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | func newIbmInstance(oc *exutil.CLI, client *exutil.IBMSession, ibmRegion, ibmVpcName, nodeName string, baseDomain string) *ibmInstance {
return &ibmInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
ibmRegion: ibmRegion,
ibmVpcName: ibmVpcName,
baseDomain: baseDomain,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 80b0ea52-b8b3-495e-ae41-cdfb1c2b70a3 | GetInstanceID | ['ibmInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | func (ibm *ibmInstance) GetInstanceID() (string, error) {
instanceID, err := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 9ed20384-313c-41b6-b1a1-307ad9cc0bb2 | Start | ['ibmInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | func (ibm *ibmInstance) Start() error {
instanceID, idErr := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.StartIBMInstance(ibm.client, instanceID)
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | a4da3521-7d06-422c-aadd-724b36dbf521 | Stop | ['ibmInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | func (ibm *ibmInstance) Stop() error {
instanceID, idErr := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.StopIBMInstance(ibm.client, instanceID)
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 4a438d5f-7d95-49ef-ad2b-a21c5174ac9a | State | ['ibmInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibm.go | func (ibm *ibmInstance) State() (string, error) {
instanceID, idErr := exutil.GetIBMInstanceID(ibm.client, ibm.oc, ibm.ibmRegion, ibm.ibmVpcName, ibm.nodeName, ibm.baseDomain)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.GetIBMInstanceStatus(ibm.client, instanceID)
} | disasterrecovery | ||||
file | openshift/openshift-tests-private | e746ed0d-9848-4f07-9dac-d58505fffa13 | compute_ibmpowervs | import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | package disasterrecovery
import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type ibmPowerVsInstance struct {
instance
ibmRegion string
ibmVpcName string
clientPowerVs *exutil.IBMPowerVsSession
}
// Get nodes and load clouds cred with the specified label.
func GetIBMPowerNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
ibmApiKey, ibmRegion, ibmVpcName, credErr := exutil.GetIBMCredentialFromCluster(oc)
o.Expect(credErr).NotTo(o.HaveOccurred())
cloudID := exutil.GetIBMPowerVsCloudID(oc, nodeNames[0])
ibmSession, sessErr := exutil.LoginIBMPowerVsCloud(ibmApiKey, ibmRegion, ibmVpcName, cloudID)
o.Expect(sessErr).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newIBMPowerInstance(oc, ibmSession, ibmRegion, ibmVpcName, nodeName))
}
return results, nil
}
func newIBMPowerInstance(oc *exutil.CLI, clientPowerVs *exutil.IBMPowerVsSession, ibmRegion, ibmVpcName, nodeName string) *ibmPowerVsInstance {
return &ibmPowerVsInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
clientPowerVs: clientPowerVs,
ibmRegion: ibmRegion,
ibmVpcName: ibmVpcName,
}
}
func (ibmPws *ibmPowerVsInstance) GetInstanceID() (string, error) {
instanceID, _, err := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
if err == nil {
e2e.Logf("VM instance ID: %s", instanceID)
return instanceID, nil
}
return "", err
}
func (ibmPws *ibmPowerVsInstance) Start() error {
instanceID, _, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "start")
}
func (ibmPws *ibmPowerVsInstance) Stop() error {
instanceID, _, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "stop")
}
func (ibmPws *ibmPowerVsInstance) State() (string, error) {
_, status, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
return status, idErr
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 6a48c140-4490-44b0-93e1-84c23e312396 | GetIBMPowerNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | func GetIBMPowerNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
ibmApiKey, ibmRegion, ibmVpcName, credErr := exutil.GetIBMCredentialFromCluster(oc)
o.Expect(credErr).NotTo(o.HaveOccurred())
cloudID := exutil.GetIBMPowerVsCloudID(oc, nodeNames[0])
ibmSession, sessErr := exutil.LoginIBMPowerVsCloud(ibmApiKey, ibmRegion, ibmVpcName, cloudID)
o.Expect(sessErr).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newIBMPowerInstance(oc, ibmSession, ibmRegion, ibmVpcName, nodeName))
}
return results, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | 67b7b533-e454-453e-a68f-7b482eb27253 | newIBMPowerInstance | ['ibmPowerVsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | func newIBMPowerInstance(oc *exutil.CLI, clientPowerVs *exutil.IBMPowerVsSession, ibmRegion, ibmVpcName, nodeName string) *ibmPowerVsInstance {
return &ibmPowerVsInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
clientPowerVs: clientPowerVs,
ibmRegion: ibmRegion,
ibmVpcName: ibmVpcName,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 005a7726-32be-4a6d-83fc-857ae304ba0a | GetInstanceID | ['ibmPowerVsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | func (ibmPws *ibmPowerVsInstance) GetInstanceID() (string, error) {
instanceID, _, err := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
if err == nil {
e2e.Logf("VM instance ID: %s", instanceID)
return instanceID, nil
}
return "", err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 4a257b1f-d34c-40b9-9dd2-8bdfd07130f5 | Start | ['ibmPowerVsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | func (ibmPws *ibmPowerVsInstance) Start() error {
instanceID, _, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "start")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | b3c417c4-ea69-45b0-9f13-411a401cea68 | Stop | ['ibmPowerVsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | func (ibmPws *ibmPowerVsInstance) Stop() error {
instanceID, _, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "stop")
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 020da187-8a0f-454d-bbb9-98eb88e155a8 | State | ['ibmPowerVsInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_ibmpowervs.go | func (ibmPws *ibmPowerVsInstance) State() (string, error) {
_, status, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
return status, idErr
} | disasterrecovery | ||||
file | openshift/openshift-tests-private | 7fd0759a-8a55-4901-b75a-b69ff8457cfe | compute_nutanix | import (
"fmt"
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | package disasterrecovery
import (
"fmt"
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type nutanixInstance struct {
instance
client *exutil.NutanixSession
}
// Get nodes and load clouds cred with the specified label.
func GetNutanixNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
nutanixUsername, nutanixPassword, nutanixEndpointURL, credErr := exutil.GetNutanixCredentialFromCluster(oc)
o.Expect(credErr).NotTo(o.HaveOccurred())
nutanixSession, sessErr := exutil.NewNutanixSession(nutanixUsername, nutanixPassword, nutanixEndpointURL)
o.Expect(sessErr).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newNutanixInstance(oc, nutanixSession, nodeName))
}
return results, nil
}
func newNutanixInstance(oc *exutil.CLI, client *exutil.NutanixSession, nodeName string) *nutanixInstance {
return &nutanixInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
}
}
func (nux *nutanixInstance) GetInstanceID() (string, error) {
instanceID, err := nux.client.GetNutanixInstanceID(nux.nodeName)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
}
func (nux *nutanixInstance) Start() error {
instanceID, idErr := nux.client.GetNutanixInstanceID(nux.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
instanceState, err := nux.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[strings.ToLower(instanceState)]; ok {
err = nux.client.SetNutanixInstanceState("ON", instanceID)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", nux.nodeName, instanceState)
}
return nil
}
func (nux *nutanixInstance) Stop() error {
instanceID, idErr := nux.client.GetNutanixInstanceID(nux.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
instanceState, err := nux.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[strings.ToLower(instanceState)]; ok {
err = nux.client.SetNutanixInstanceState("OFF", instanceID)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", nux.nodeName, instanceState)
}
return nil
}
func (nux *nutanixInstance) State() (string, error) {
return nux.client.GetNutanixInstanceState(nux.nodeName)
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 8494696b-77c1-4a2a-b158-9136b1b2d9fe | GetNutanixNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | func GetNutanixNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
nutanixUsername, nutanixPassword, nutanixEndpointURL, credErr := exutil.GetNutanixCredentialFromCluster(oc)
o.Expect(credErr).NotTo(o.HaveOccurred())
nutanixSession, sessErr := exutil.NewNutanixSession(nutanixUsername, nutanixPassword, nutanixEndpointURL)
o.Expect(sessErr).NotTo(o.HaveOccurred())
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newNutanixInstance(oc, nutanixSession, nodeName))
}
return results, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | c4fb1e5a-5108-462f-b84a-359c3e4b7c37 | newNutanixInstance | ['nutanixInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | func newNutanixInstance(oc *exutil.CLI, client *exutil.NutanixSession, nodeName string) *nutanixInstance {
return &nutanixInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
client: client,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | ce059af9-cc34-4e4e-91b1-1be318c9a54f | GetInstanceID | ['nutanixInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | func (nux *nutanixInstance) GetInstanceID() (string, error) {
instanceID, err := nux.client.GetNutanixInstanceID(nux.nodeName)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return instanceID, nil
}
return "", err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | bf509f1b-c00e-456a-8e6c-e8f3a6f08262 | Start | ['"fmt"', '"strings"'] | ['nutanixInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | func (nux *nutanixInstance) Start() error {
instanceID, idErr := nux.client.GetNutanixInstanceID(nux.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
instanceState, err := nux.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[strings.ToLower(instanceState)]; ok {
err = nux.client.SetNutanixInstanceState("ON", instanceID)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", nux.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 2530cd7b-a7cf-4949-a1a2-cec3f70b2faa | Stop | ['"fmt"', '"strings"'] | ['nutanixInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | func (nux *nutanixInstance) Stop() error {
instanceID, idErr := nux.client.GetNutanixInstanceID(nux.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
instanceState, err := nux.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[strings.ToLower(instanceState)]; ok {
err = nux.client.SetNutanixInstanceState("OFF", instanceID)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", nux.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 0674c020-806c-4d5d-a835-49863c60f064 | State | ['nutanixInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_nutanix.go | func (nux *nutanixInstance) State() (string, error) {
return nux.client.GetNutanixInstanceState(nux.nodeName)
} | disasterrecovery | ||||
file | openshift/openshift-tests-private | 92e9b004-01c9-4a0a-8b12-5b6280f4f401 | compute_upibaremetal | import (
"fmt"
"gopkg.in/yaml.v3"
"os"
"path"
"strings"
"time"
ipmi "github.com/vmware/goipmi"
"k8s.io/apimachinery/pkg/util/wait"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | package disasterrecovery
import (
"fmt"
"gopkg.in/yaml.v3"
"os"
"path"
"strings"
"time"
ipmi "github.com/vmware/goipmi"
"k8s.io/apimachinery/pkg/util/wait"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
type UPIInstance struct {
instance
upiObj *exutil.RDU2Host
upiClient *ipmi.Client
buildid string
}
const RDU2BaseDomain = "arm.eng.rdu2.redhat.com"
// GetUPIBaremetalNodes get nodes by label and returns a list of ComputeNode objects with the required information to
// control the nodes.
func GetUPIBaremetalNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(nodeNames[0], RDU2BaseDomain) {
g.Skip("Currently, UPI baremetal DR is only supported on RDU2 clusters.")
}
// Get Bastion Host Address
bastionHost := os.Getenv("QE_BASTION_PUBLIC_ADDRESS")
if bastionHost == "" {
g.Fail("Failed to get the RDU2 bastion address, failing.")
}
// Parse the hosts.yaml file to get the RDU2Host objects.
hostsFilePath := path.Join(os.Getenv("SHARED_DIR"), "hosts.yaml")
_, err = os.Stat(hostsFilePath)
o.Expect(err).NotTo(o.HaveOccurred())
yamlBytes, err := os.ReadFile(hostsFilePath)
o.Expect(err).NotTo(o.HaveOccurred())
// Unmarshal the yaml into a slice of RDU2Host objects
var yamlData []exutil.RDU2Host
err = yaml.Unmarshal(yamlBytes, &yamlData)
o.Expect(err).NotTo(o.HaveOccurred())
// Convert slice to map of name to RDU2Host objects to allow lookup by name
var hostsMap = make(map[string]*exutil.RDU2Host)
for n := range yamlData {
yamlData[n].JumpHost = bastionHost
hostsMap[yamlData[n].Name] = &yamlData[n]
}
// Create the UPIInstance objects and the results slice
var results []ComputeNode
for _, nodeName := range nodeNames {
o.Expect(err).NotTo(o.HaveOccurred())
results = append(results, newUPIbaremetalInstance(oc, nodeName,
hostsMap[strings.Split(nodeName, ".")[0]]))
}
return results, nil
}
func newUPIbaremetalInstance(oc *exutil.CLI, nodeName string, host *exutil.RDU2Host) *UPIInstance {
return &UPIInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
upiObj: host,
}
}
// GetInstanceID returns the instance ID of the node (the host key's value), but it's not used for UPI baremetal.
// Previously, it was returning the bmc_address, but it is not the instance ID and is now meaningless/unreachable.
func (upi *UPIInstance) GetInstanceID() (string, error) {
return upi.upiObj.Host, nil
}
// Start starts the instance
func (upi *UPIInstance) Start() error {
instanceState, err := upi.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = upi.upiObj.StartUPIbaremetalInstance()
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", upi.nodeName, instanceState)
}
return nil
}
// Stop stops the instance
func (upi *UPIInstance) Stop() error {
instanceState, err := upi.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = upi.upiObj.StopUPIbaremetalInstance()
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", upi.nodeName, instanceState)
}
return nil
}
// State returns the state of the instance
func (upi *UPIInstance) State() (string, error) {
var (
instanceState string
statusErr error
)
errVmstate := wait.Poll(10*time.Second, 200*time.Second, func() (bool, error) {
instanceState, statusErr = upi.upiObj.GetMachinePowerStatus()
if statusErr != nil {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errVmstate,
fmt.Sprintf("Failed to get power status for master node: %s, error: %s", upi.nodeName, statusErr))
return strings.ToLower(instanceState), statusErr
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 0a1414db-f763-4050-992d-d808f3375fd9 | GetUPIBaremetalNodes | ['"os"', '"path"', '"strings"'] | ['UPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | func GetUPIBaremetalNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(nodeNames[0], RDU2BaseDomain) {
g.Skip("Currently, UPI baremetal DR is only supported on RDU2 clusters.")
}
// Get Bastion Host Address
bastionHost := os.Getenv("QE_BASTION_PUBLIC_ADDRESS")
if bastionHost == "" {
g.Fail("Failed to get the RDU2 bastion address, failing.")
}
// Parse the hosts.yaml file to get the RDU2Host objects.
hostsFilePath := path.Join(os.Getenv("SHARED_DIR"), "hosts.yaml")
_, err = os.Stat(hostsFilePath)
o.Expect(err).NotTo(o.HaveOccurred())
yamlBytes, err := os.ReadFile(hostsFilePath)
o.Expect(err).NotTo(o.HaveOccurred())
// Unmarshal the yaml into a slice of RDU2Host objects
var yamlData []exutil.RDU2Host
err = yaml.Unmarshal(yamlBytes, &yamlData)
o.Expect(err).NotTo(o.HaveOccurred())
// Convert slice to map of name to RDU2Host objects to allow lookup by name
var hostsMap = make(map[string]*exutil.RDU2Host)
for n := range yamlData {
yamlData[n].JumpHost = bastionHost
hostsMap[yamlData[n].Name] = &yamlData[n]
}
// Create the UPIInstance objects and the results slice
var results []ComputeNode
for _, nodeName := range nodeNames {
o.Expect(err).NotTo(o.HaveOccurred())
results = append(results, newUPIbaremetalInstance(oc, nodeName,
hostsMap[strings.Split(nodeName, ".")[0]]))
}
return results, nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 06b2e308-76fe-4248-b16d-4c0c8d81f2c3 | newUPIbaremetalInstance | ['UPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | func newUPIbaremetalInstance(oc *exutil.CLI, nodeName string, host *exutil.RDU2Host) *UPIInstance {
return &UPIInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
upiObj: host,
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 7c9fb671-4e6c-41f6-be50-279c76c62084 | GetInstanceID | ['UPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | func (upi *UPIInstance) GetInstanceID() (string, error) {
return upi.upiObj.Host, nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 3b25557a-19da-4472-a2e1-1c3e5f6111c5 | Start | ['"fmt"'] | ['UPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | func (upi *UPIInstance) Start() error {
instanceState, err := upi.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = upi.upiObj.StartUPIbaremetalInstance()
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", upi.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 8aae920c-fcde-4535-ab98-1af9ed3faf31 | Stop | ['"fmt"'] | ['UPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | func (upi *UPIInstance) Stop() error {
instanceState, err := upi.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = upi.upiObj.StopUPIbaremetalInstance()
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", upi.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | f8470b05-c468-43d9-8c0f-20eb68630845 | State | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['UPIInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_upibaremetal.go | func (upi *UPIInstance) State() (string, error) {
var (
instanceState string
statusErr error
)
errVmstate := wait.Poll(10*time.Second, 200*time.Second, func() (bool, error) {
instanceState, statusErr = upi.upiObj.GetMachinePowerStatus()
if statusErr != nil {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errVmstate,
fmt.Sprintf("Failed to get power status for master node: %s, error: %s", upi.nodeName, statusErr))
return strings.ToLower(instanceState), statusErr
} | disasterrecovery | |||
file | openshift/openshift-tests-private | eaee3f28-cfe6-44dc-ba2d-6f18fba91327 | compute | import (
"fmt"
"os/exec"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
cvers "github.com/openshift/openshift-tests-private/test/extended/mco"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute.go | package disasterrecovery
import (
"fmt"
"os/exec"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
cvers "github.com/openshift/openshift-tests-private/test/extended/mco"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// ComputeNode interface to handle compute node e.g. start or stop
type ComputeNode interface {
GetName() string
GetInstanceID() (string, error)
Start() error
Stop() error
State() (string, error)
}
type instance struct {
nodeName string
oc *exutil.CLI
}
func (i *instance) GetName() string {
return i.nodeName
}
// ComputeNodes handles ComputeNode interface
type ComputeNodes []ComputeNode
// GetNodes get master nodes according to platform and creds with the specified label.
func GetNodes(oc *exutil.CLI, label string) (ComputeNodes, func()) {
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
e2e.Logf("\n AWS is detected, running the case on AWS\n")
return GetAwsNodes(oc, label)
case "gcp":
e2e.Logf("\n GCP is detected, running the case on gcp\n")
return GetGcpNodes(oc, label)
case "vsphere":
e2e.Logf("\n vsphere is detected, running the case on vsphere\n")
return GetVsphereNodes(oc, label)
case "openstack":
e2e.Logf("\n OSP is detected, running the case on osp\n")
return GetOspNodes(oc, label)
case "azure":
e2e.Logf("\n Azure is detected, running the case on azure\n")
return GetAzureNodes(oc, label)
case "baremetal":
e2e.Logf("\n IPI Baremetal is detected, running the case on baremetal\n")
return GetBaremetalNodes(oc, label)
case "none":
e2e.Logf("\n UPI Baremetal is detected, running the case on baremetal\n")
return GetUPIBaremetalNodes(oc, label)
case "ibmcloud":
e2e.Logf("\n IBM is detected, running the case on IBM\n")
return GetIbmNodes(oc, label)
case "nutanix":
e2e.Logf("\n Nutanix is detected, running the case on nutanix\n")
return GetNutanixNodes(oc, label)
case "powervs":
e2e.Logf("\n IBM Powervs is detected, running the case on PowerVs\n")
return GetIBMPowerNodes(oc, label)
default:
g.Skip("Not support cloud provider for DR cases for now. Test cases should be run on IBM or vsphere or aws or gcp or openstack or azure or baremetal, skip for other platforms!!")
}
return nil, nil
}
func (n ComputeNodes) leaderMasterNodeName(oc *exutil.CLI) ComputeNode {
// get clusterversion
e2e.Logf("Checking clusterversion")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Get leader node")
var leaderNode string
if cvers.CompareVersions(clusterVersion, ">", "4.9") {
leaderNode, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("leases", "kube-controller-manager", "-n", "kube-system", "-o=jsonpath={.spec.holderIdentity}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
masterStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "kube-controller-manager", "-n", "kube-system", "-o", "jsonpath='{.metadata.annotations}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
jqCmd := fmt.Sprintf(`echo %s | jq -r .'"control-plane.alpha.kubernetes.io/leader"'| jq -r .holderIdentity`, masterStr)
masterNode, err := exec.Command("bash", "-c", jqCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
leaderNode = string(masterNode)
}
masterNodeStr := strings.Split(leaderNode, "_")
//Changing format for gcp
if exutil.CheckPlatform(oc) == "gcp" || exutil.CheckPlatform(oc) == "openstack" {
masterNodeStr = strings.Split(masterNodeStr[0], ".")
}
for _, node := range n {
if strings.Contains(node.GetName(), masterNodeStr[0]) {
e2e.Logf("Leader master node is :: %v", node.GetName())
return node
}
}
return nil
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | b32772b0-a0d8-4791-a049-8a7f7e4f4f64 | GetName | ['instance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute.go | func (i *instance) GetName() string {
return i.nodeName
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 52cf3a42-9ecc-45c6-8785-5bc7318018fc | GetNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute.go | func GetNodes(oc *exutil.CLI, label string) (ComputeNodes, func()) {
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
e2e.Logf("\n AWS is detected, running the case on AWS\n")
return GetAwsNodes(oc, label)
case "gcp":
e2e.Logf("\n GCP is detected, running the case on gcp\n")
return GetGcpNodes(oc, label)
case "vsphere":
e2e.Logf("\n vsphere is detected, running the case on vsphere\n")
return GetVsphereNodes(oc, label)
case "openstack":
e2e.Logf("\n OSP is detected, running the case on osp\n")
return GetOspNodes(oc, label)
case "azure":
e2e.Logf("\n Azure is detected, running the case on azure\n")
return GetAzureNodes(oc, label)
case "baremetal":
e2e.Logf("\n IPI Baremetal is detected, running the case on baremetal\n")
return GetBaremetalNodes(oc, label)
case "none":
e2e.Logf("\n UPI Baremetal is detected, running the case on baremetal\n")
return GetUPIBaremetalNodes(oc, label)
case "ibmcloud":
e2e.Logf("\n IBM is detected, running the case on IBM\n")
return GetIbmNodes(oc, label)
case "nutanix":
e2e.Logf("\n Nutanix is detected, running the case on nutanix\n")
return GetNutanixNodes(oc, label)
case "powervs":
e2e.Logf("\n IBM Powervs is detected, running the case on PowerVs\n")
return GetIBMPowerNodes(oc, label)
default:
g.Skip("Not support cloud provider for DR cases for now. Test cases should be run on IBM or vsphere or aws or gcp or openstack or azure or baremetal, skip for other platforms!!")
}
return nil, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | f685ccec-43fb-4c6d-9047-a07cff65a78e | leaderMasterNodeName | ['"fmt"', '"os/exec"', '"strings"'] | ['ComputeNode'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute.go | func (n ComputeNodes) leaderMasterNodeName(oc *exutil.CLI) ComputeNode {
// get clusterversion
e2e.Logf("Checking clusterversion")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Get leader node")
var leaderNode string
if cvers.CompareVersions(clusterVersion, ">", "4.9") {
leaderNode, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("leases", "kube-controller-manager", "-n", "kube-system", "-o=jsonpath={.spec.holderIdentity}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
masterStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "kube-controller-manager", "-n", "kube-system", "-o", "jsonpath='{.metadata.annotations}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
jqCmd := fmt.Sprintf(`echo %s | jq -r .'"control-plane.alpha.kubernetes.io/leader"'| jq -r .holderIdentity`, masterStr)
masterNode, err := exec.Command("bash", "-c", jqCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
leaderNode = string(masterNode)
}
masterNodeStr := strings.Split(leaderNode, "_")
//Changing format for gcp
if exutil.CheckPlatform(oc) == "gcp" || exutil.CheckPlatform(oc) == "openstack" {
masterNodeStr = strings.Split(masterNodeStr[0], ".")
}
for _, node := range n {
if strings.Contains(node.GetName(), masterNodeStr[0]) {
e2e.Logf("Leader master node is :: %v", node.GetName())
return node
}
}
return nil
} | disasterrecovery | |||
test | openshift/openshift-tests-private | 463bc90a-fad3-4277-addc-794a51e3bf02 | apiserver_dr | import (
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_dr.go | package disasterrecovery
import (
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-disasterrecovery] DR_Testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithoutNamespace("default")
)
// author: [email protected]
g.It("LEVEL0-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Author:rgangwar-High-19941-[Apiserver] [failure inject] when 1 master is down the cluster should continue serving well without unavailable more than 30s [Disruptive]", func() {
var (
// Adding wait time here of 90s because sometimes wait poll taking more thans 30s to complete for aws, gcp and vsphere platform.
expectedOutageTime = 90
randProject1 = "test-ocp19941-project"
dirname = "/tmp/-OCP-19941/"
nodeName string
)
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
workaroundForOCPBUGS44608 := func() {
// Check if the cluster is abnormal, There will be some delay into the OCPBUGS-44608 after case last step
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=120s", "--timeout=3m").Execute()
if err == nil {
return
}
// Define the etcd CO status for the degraded state
etcdCoStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "True"}
currentEtcdCoStatus := getCoStatus(oc, "etcd", etcdCoStatus)
// If etcd CO is degraded
if reflect.DeepEqual(currentEtcdCoStatus, etcdCoStatus) {
etcdPodList, err := exutil.GetAllPodsWithLabel(oc, "openshift-etcd", "etcd")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(etcdPodList).ShouldNot(o.BeEmpty())
// Identify the abnormal etcd pod and check if it runs into the bug https://issues.redhat.com/browse/OCPBUGS-44608
errMsg := "dataDir has been destroyed and must be removed from the cluster"
var abnormalEtcdNode, abnormalEtcdPod string
for _, etcdPod := range etcdPodList {
etcdPodName := string(etcdPod)
containerLastState, errEtcd := oc.WithoutNamespace().Run("get").Args("pod", etcdPodName, "-n", "openshift-etcd", "-o", `jsonpath={.status.containerStatuses[0].lastState}`).Output()
o.Expect(errEtcd).NotTo(o.HaveOccurred())
if len(containerLastState) != 0 && strings.Contains(containerLastState, errMsg) {
abnormalEtcdPod = etcdPodName
abnormalEtcdNode, err = oc.WithoutNamespace().Run("get").Args("pod", abnormalEtcdPod, "-n", "openshift-etcd", "-o", `jsonpath={.spec.nodeName}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("[Warning:] The cluster ran into the bug OCPBUGS-44608, fixing with workaround ...")
break
}
}
// Apply the workaround: https://access.redhat.com/solutions/6962106
for _, etcdPod := range etcdPodList {
if etcdPod != abnormalEtcdPod {
removeEtcdMember(oc, etcdPod, abnormalEtcdNode)
break
}
}
// Force redeploy etcd
patch := fmt.Sprintf(`[ {"op": "replace", "path": "/spec/forceRedeploymentReason", "value": "Force Redploy %v" } ]`, time.Now().UnixNano())
patchForceRedploymentError := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd/cluster", "--type=json", "-p", patch).Execute()
o.Expect(patchForceRedploymentError).NotTo(o.HaveOccurred())
// Wait for cluster stability
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
e2e.Logf("Cluster should be healthy befor running dr case.")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err))
}
platform := exutil.CheckPlatform(oc)
isAzureStack, _ := isAzureStackCluster(oc)
exutil.By("1. Get the leader master node of cluster")
nodes, cleanup := GetNodes(oc, "master")
if cleanup != nil {
defer cleanup()
}
// we're only interested in the leader
node := nodes.leaderMasterNodeName(oc)
if node != nil {
nodeName = node.GetName()
} else {
e2e.Failf("Failed to get the leader master node of cluster!")
}
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", randProject1, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
defer func() {
contextErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", "admin").Execute()
o.Expect(contextErr).NotTo(o.HaveOccurred())
contextOutput, contextErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-context").Output()
o.Expect(contextErr).NotTo(o.HaveOccurred())
e2e.Logf("Context after rollack :: %v", contextOutput)
}()
defer func() {
e2e.Logf("Recovering cluster")
vmState, err := node.State()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vmState).ShouldNot(o.BeEmpty(), fmt.Sprintf("Not able to get leader_master_node %s machine instance state", nodeName))
if _, ok := stopStates[vmState]; ok {
e2e.Logf("Starting leader master node %s", nodeName)
err = node.Start()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(10 * time.Second)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 240*time.Second, false, func(cxt context.Context) (bool, error) {
vmState, stateErr := node.State()
if stateErr != nil {
return false, stateErr
}
if _, ok := startStates[vmState]; ok {
e2e.Logf("The leader master node %s has been started completely!", nodeName)
return true, nil
} else {
e2e.Logf("The leader master node %s is in %s vmState!", nodeName, vmState)
return false, nil
}
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The leader master node %s was unable to start with error %v!", nodeName, err))
err = ClusterHealthcheck(oc, "OCP-19941/log")
o.Expect(err).NotTo(o.HaveOccurred())
} else if _, ok := startStates[vmState]; ok {
e2e.Logf("leader master node %s state is already %s", nodeName, vmState)
}
}()
exutil.By("2. Shut down a leader master node to simulate a user failure.")
e2e.Logf("Checking leader_master_node instance state.")
vmState, stateErr := node.State()
o.Expect(stateErr).NotTo(o.HaveOccurred())
o.Expect(vmState).ShouldNot(o.BeEmpty(), fmt.Sprintf("Not able to get leader_master_node %s machine instance state", nodeName))
if _, ok := startStates[vmState]; ok {
e2e.Logf("Bringing down leader master node: %s", nodeName)
err = node.Stop()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node %s is stopping ...", nodeName)
} else {
e2e.Failf("leader_master_node %s instance state is already %s....before running case, so exiting from case run as cluster not ready.", nodeName, vmState)
}
exutil.By("3. When the leader master node is unavailable, apiservers continue to serve after a short interruption.")
// Adding wait time here of 240s because sometimes wait poll takes more than 30s to complete for osp, azurestack and vsphere platform.
if platform == "openstack" || isAzureStack || platform == "vsphere" {
expectedOutageTime = 240
}
waitTime := expectedOutageTime + 30
timeFirstServiceDisruption := time.Now()
isFirstServiceDisruption := false
anyDisruptionOccurred := false
e2e.Logf("#### Watching start time(s) :: %v ####\n", time.Now().Format(time.RFC3339))
apiserverOutageWatcher := wait.Poll(3*time.Second, time.Duration(waitTime)*time.Second, func() (bool, error) {
checkHealth := func(description string, command []string) error {
_, err := exec.Command(command[0], command[1:]...).Output()
if err != nil {
e2e.Logf("%v :: %s failed :: %s\n", time.Now().Format(time.RFC3339), description, err)
if !isFirstServiceDisruption {
isFirstServiceDisruption = true
timeFirstServiceDisruption = time.Now()
}
return err
}
e2e.Logf("%v :: %s succeeded\n", time.Now().Format(time.RFC3339), description)
return nil
}
getNodeError := checkHealth("KAS health check: obtaining the status of nodes", []string{"oc", "get", "node"})
loginError := checkHealth("OAUTH health check: user admin login", []string{"oc", "login", "-u", "system:admin", "-n", "default"})
getProjectError := checkHealth("OAS health check: obtaining the status of project openshift-apiserver", []string{"bash", "-c", "oc get project/openshift-apiserver 2>&1"})
if isFirstServiceDisruption {
anyDisruptionOccurred = true
e2e.Logf("The first disruption of openshift-apiserver occurred :: %v", timeFirstServiceDisruption.Format(time.RFC3339))
// Check if all apiservers are ready.
if getNodeError == nil && loginError == nil && getProjectError == nil {
if checkHealth("Re-checking node status for KAS health", []string{"oc", "get", "node"}) == nil &&
checkHealth("Re-checking user admin login for OAUTH health", []string{"oc", "login", "-u", "system:admin", "-n", "default"}) == nil &&
checkHealth("Re-checking project openshift-apiserver status for OAS health", []string{"bash", "-c", "oc get project/openshift-apiserver 2>&1"}) == nil {
serviceRecoveryTime := time.Now()
e2e.Logf("#### The cluster apiservers have been recovered at time :: %v ####\n", serviceRecoveryTime.Format("2006-01-02 15:04:05"))
diff := serviceRecoveryTime.Sub(timeFirstServiceDisruption)
e2e.Logf("#### Apiservers outage time(s) :: %f ####\n", diff.Seconds())
if int(diff.Seconds()) > expectedOutageTime {
return false, fmt.Errorf(fmt.Sprintf("service of apiserver disruption time is %d", int(diff.Seconds())))
}
return true, nil
}
}
}
return false, nil
})
if !anyDisruptionOccurred {
e2e.Logf("No disruptions occurred during the test.")
} else {
exutil.AssertWaitPollNoErr(apiserverOutageWatcher, fmt.Sprintf("%v, expected time: %v", apiserverOutageWatcher, expectedOutageTime))
}
exutil.By("4. During the leader master node is unavailable, verify the cluster availability")
err = ClusterSanitycheck(oc, randProject1)
if err == nil {
e2e.Logf("Post down leader master node, cluster availability sanity check passed")
} else {
e2e.Failf("Post down leader master node, cluster availability sanity check failed :: %s ", err)
}
e2e.Logf("Ensure that leader master node has been stopped completedly.")
waitTime = 240
err = wait.Poll(10*time.Second, time.Duration(waitTime)*time.Second, func() (bool, error) {
vmState, stateErr := node.State()
o.Expect(stateErr).NotTo(o.HaveOccurred())
if _, ok := stopStates[vmState]; ok {
e2e.Logf("The leader master node %s has been stopped completely!", nodeName)
return true, nil
} else {
e2e.Logf("The leader master node %s is in %s vmState!", nodeName, vmState)
return false, nil
}
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The leader master node %s was unable to stop!", nodeName))
e2e.Logf("Starting leader master node")
err = node.Start()
o.Expect(err).NotTo(o.HaveOccurred())
// Wait for some time and then check the status to avoid a fake start
time.Sleep(10 * time.Second)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 240*time.Second, false, func(cxt context.Context) (bool, error) {
vmState, stateErr := node.State()
if stateErr != nil {
return false, stateErr
}
if _, ok := startStates[vmState]; ok {
e2e.Logf("The leader master node %s has been started completely!", nodeName)
return true, nil
} else {
e2e.Logf("The leader master node %s is in %s vmState!", nodeName, vmState)
return false, nil
}
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The leader master node %s was unable to start!", nodeName))
exutil.By("5. After restarted the leader master node, verify the cluster availability")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err == nil {
e2e.Logf("Post restarting the leader master node, cluster health check passed")
} else {
e2e.Failf("Post restarting the leader master node, cluster health check failed :: %s ", err)
}
if platform == "openstack" {
workaroundForOCPBUGS44608()
}
})
// author: [email protected]
g.It("ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Author:kewang-Medium-67718-[Apiserver] The cluster still works well after restarted frequently multiple times [Disruptive]", func() {
e2e.Logf(">> Restart cluster reliability test <<")
restartNum := 1
// The number of tests depends on the size of the value of the ENV var TEST_TIMEOUT_DISASTERRECOVERY
// There are some reliability test profiles of Prow CI which define ENV var TEST_TIMEOUT_DISASTERRECOVERY
// For the reliability test, the number of tests is in this range(20,50)
testTimeout, exists := os.LookupEnv("TEST_TIMEOUT_DISASTERRECOVERY")
if exists && testTimeout != "" {
t, err := strconv.Atoi(testTimeout)
o.Expect(err).NotTo(o.HaveOccurred())
if t >= 900 {
restartNum = int(getRandomNum(20, 50))
}
}
e2e.Logf("#### Total restart num:%d ####", restartNum)
restartCluster := func() bool {
var (
dirname = "/tmp/-OCP-67718/"
n = 0
)
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=60s", "--timeout=30m").Output()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before restart cluster :: %s ", err))
}
exutil.By("1. Get nodes of cluster")
masterNodes, cleanup := GetNodes(oc.NotShowInfo(), "master")
if cleanup != nil {
defer cleanup()
}
workerNodes, cleanup := GetNodes(oc.NotShowInfo(), "worker")
if cleanup != nil {
defer cleanup()
}
exutil.By("2. Shut down nodes to stop cluster.")
stopNodesOfCluster := func(nodes ComputeNodes, shutdownType int) {
// The method GetNodes returns short name list on GCP, have to handle with separately
var gcpNodeFullName []string
if exutil.CheckPlatform(oc) == "gcp" && shutdownType == 2 {
gcpMasters := getNodeListByLabel(oc.NotShowInfo(), "node-role.kubernetes.io/master=")
gcpWorkers := getNodeListByLabel(oc.NotShowInfo(), "node-role.kubernetes.io/worker=")
gcpNodeFullName = append(gcpMasters, gcpWorkers...)
for _, nodeName := range gcpNodeFullName {
_, err = exutil.DebugNodeWithChroot(oc.NotShowInfo(), nodeName, "shutdown", "-h", "1")
}
return
}
for _, node := range nodes {
vmState, stateErr := node.State()
nodeName := node.GetName()
o.Expect(stateErr).NotTo(o.HaveOccurred())
o.Expect(vmState).ShouldNot(o.BeEmpty(), fmt.Sprintf("Not able to get node %s machine instance state", nodeName))
if _, ok := startStates[vmState]; ok {
if shutdownType == 1 {
stateErr = node.Stop()
} else {
_, stateErr = exutil.DebugNodeRetryWithOptionsAndChroot(oc.NotShowInfo(), nodeName, []string{"--to-namespace=openshift-kube-apiserver"}, "shutdown", "-h", "1")
}
o.Expect(stateErr).NotTo(o.HaveOccurred())
} else {
e2e.Logf("The node %s are not active :: %s", nodeName, err)
}
}
}
// Number 1 indicates indicates force shutdown, 2 indicates soft shutdown
shutdownType := rand.Intn(2-1+1) + 1
if shutdownType == 1 {
e2e.Logf("Force node shutdown ...")
} else {
e2e.Logf("Nodes are being soft shutdown ...")
}
// Keep this order, worker nodes first, then master nodes, especially soft shutdown
stopNodesOfCluster(workerNodes, shutdownType)
stopNodesOfCluster(masterNodes, shutdownType)
exutil.By("3. Waiting for the cluster to shutdown completely...")
nodes := append(masterNodes, workerNodes...)
numOfNodes := len(nodes)
duration := time.Duration(300)
if shutdownType == 2 {
duration = time.Duration(480)
}
err = wait.PollUntilContextTimeout(context.Background(), 25*time.Second, duration*time.Second, false, func(cxt context.Context) (bool, error) {
poweroffDone := false
for i := 0; i < len(nodes); i++ {
vmState, stateErr := nodes[i].State()
if stateErr != nil {
return false, stateErr
}
if _, ok := stopStates[vmState]; ok {
n += 1
// Remove completely stopped node
nodes = append(nodes[:i], nodes[i+1:]...)
i--
}
}
if n == numOfNodes {
poweroffDone = true
}
e2e.Logf("%d/%d nodes have been stopped completely!", n, numOfNodes)
return poweroffDone, nil
})
exutil.AssertWaitPollNoErr(err, "The clsuter was unable to stop!")
exutil.By("4. Start nodes again after the cluster has been shut down completely")
n = 0
nodes = append(masterNodes, workerNodes...)
for _, node := range nodes {
err = node.Start()
if err != nil {
e2e.Failf("Failed to start the node %s", node.GetName())
}
}
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, duration*time.Second, false, func(cxt context.Context) (bool, error) {
poweronDone := false
for i := 0; i < len(nodes); i++ {
vmState, stateErr := nodes[i].State()
if stateErr != nil {
return false, stateErr
}
if _, ok := startStates[vmState]; ok {
n += 1
// Remove completely stopped node
nodes = append(nodes[:i], nodes[i+1:]...)
i--
}
}
if n == numOfNodes {
poweronDone = true
}
e2e.Logf("%d/%d nodes have been started completely!", n, numOfNodes)
return poweronDone, nil
})
exutil.AssertWaitPollNoErr(err, "The clsuter was unable to start up!")
exutil.By("5. After restarted nodes of the cluster, verify the cluster availability")
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=60s", "--timeout=30m").Output()
if err != nil {
e2e.Logf("Post restarting the cluster, cluster health check failed :: %s ", err)
return false
}
return true
}
for i := 0; i < restartNum; i++ {
if ok := restartCluster(); ok {
e2e.Logf("The cluster restart %d: Succeeded", i+1)
}
}
})
})
| package disasterrecovery | ||||
test case | openshift/openshift-tests-private | 8f35192c-8523-41d5-875a-a72cf96c8562 | LEVEL0-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Author:rgangwar-High-19941-[Apiserver] [failure inject] when 1 master is down the cluster should continue serving well without unavailable more than 30s [Disruptive] | ['"context"', '"fmt"', '"os"', '"os/exec"', '"reflect"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_dr.go | g.It("LEVEL0-ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Author:rgangwar-High-19941-[Apiserver] [failure inject] when 1 master is down the cluster should continue serving well without unavailable more than 30s [Disruptive]", func() {
var (
// Adding wait time here of 90s because sometimes wait poll taking more thans 30s to complete for aws, gcp and vsphere platform.
expectedOutageTime = 90
randProject1 = "test-ocp19941-project"
dirname = "/tmp/-OCP-19941/"
nodeName string
)
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
workaroundForOCPBUGS44608 := func() {
// Check if the cluster is abnormal, There will be some delay into the OCPBUGS-44608 after case last step
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=120s", "--timeout=3m").Execute()
if err == nil {
return
}
// Define the etcd CO status for the degraded state
etcdCoStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "True"}
currentEtcdCoStatus := getCoStatus(oc, "etcd", etcdCoStatus)
// If etcd CO is degraded
if reflect.DeepEqual(currentEtcdCoStatus, etcdCoStatus) {
etcdPodList, err := exutil.GetAllPodsWithLabel(oc, "openshift-etcd", "etcd")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(etcdPodList).ShouldNot(o.BeEmpty())
// Identify the abnormal etcd pod and check if it runs into the bug https://issues.redhat.com/browse/OCPBUGS-44608
errMsg := "dataDir has been destroyed and must be removed from the cluster"
var abnormalEtcdNode, abnormalEtcdPod string
for _, etcdPod := range etcdPodList {
etcdPodName := string(etcdPod)
containerLastState, errEtcd := oc.WithoutNamespace().Run("get").Args("pod", etcdPodName, "-n", "openshift-etcd", "-o", `jsonpath={.status.containerStatuses[0].lastState}`).Output()
o.Expect(errEtcd).NotTo(o.HaveOccurred())
if len(containerLastState) != 0 && strings.Contains(containerLastState, errMsg) {
abnormalEtcdPod = etcdPodName
abnormalEtcdNode, err = oc.WithoutNamespace().Run("get").Args("pod", abnormalEtcdPod, "-n", "openshift-etcd", "-o", `jsonpath={.spec.nodeName}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("[Warning:] The cluster ran into the bug OCPBUGS-44608, fixing with workaround ...")
break
}
}
// Apply the workaround: https://access.redhat.com/solutions/6962106
for _, etcdPod := range etcdPodList {
if etcdPod != abnormalEtcdPod {
removeEtcdMember(oc, etcdPod, abnormalEtcdNode)
break
}
}
// Force redeploy etcd
patch := fmt.Sprintf(`[ {"op": "replace", "path": "/spec/forceRedeploymentReason", "value": "Force Redploy %v" } ]`, time.Now().UnixNano())
patchForceRedploymentError := oc.AsAdmin().WithoutNamespace().Run("patch").Args("etcd/cluster", "--type=json", "-p", patch).Execute()
o.Expect(patchForceRedploymentError).NotTo(o.HaveOccurred())
// Wait for cluster stability
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
e2e.Logf("Cluster should be healthy befor running dr case.")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before running case :: %s ", err))
}
platform := exutil.CheckPlatform(oc)
isAzureStack, _ := isAzureStackCluster(oc)
exutil.By("1. Get the leader master node of cluster")
nodes, cleanup := GetNodes(oc, "master")
if cleanup != nil {
defer cleanup()
}
// we're only interested in the leader
node := nodes.leaderMasterNodeName(oc)
if node != nil {
nodeName = node.GetName()
} else {
e2e.Failf("Failed to get the leader master node of cluster!")
}
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", randProject1, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
defer func() {
contextErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", "admin").Execute()
o.Expect(contextErr).NotTo(o.HaveOccurred())
contextOutput, contextErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-context").Output()
o.Expect(contextErr).NotTo(o.HaveOccurred())
e2e.Logf("Context after rollack :: %v", contextOutput)
}()
defer func() {
e2e.Logf("Recovering cluster")
vmState, err := node.State()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vmState).ShouldNot(o.BeEmpty(), fmt.Sprintf("Not able to get leader_master_node %s machine instance state", nodeName))
if _, ok := stopStates[vmState]; ok {
e2e.Logf("Starting leader master node %s", nodeName)
err = node.Start()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(10 * time.Second)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 240*time.Second, false, func(cxt context.Context) (bool, error) {
vmState, stateErr := node.State()
if stateErr != nil {
return false, stateErr
}
if _, ok := startStates[vmState]; ok {
e2e.Logf("The leader master node %s has been started completely!", nodeName)
return true, nil
} else {
e2e.Logf("The leader master node %s is in %s vmState!", nodeName, vmState)
return false, nil
}
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The leader master node %s was unable to start with error %v!", nodeName, err))
err = ClusterHealthcheck(oc, "OCP-19941/log")
o.Expect(err).NotTo(o.HaveOccurred())
} else if _, ok := startStates[vmState]; ok {
e2e.Logf("leader master node %s state is already %s", nodeName, vmState)
}
}()
exutil.By("2. Shut down a leader master node to simulate a user failure.")
e2e.Logf("Checking leader_master_node instance state.")
vmState, stateErr := node.State()
o.Expect(stateErr).NotTo(o.HaveOccurred())
o.Expect(vmState).ShouldNot(o.BeEmpty(), fmt.Sprintf("Not able to get leader_master_node %s machine instance state", nodeName))
if _, ok := startStates[vmState]; ok {
e2e.Logf("Bringing down leader master node: %s", nodeName)
err = node.Stop()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node %s is stopping ...", nodeName)
} else {
e2e.Failf("leader_master_node %s instance state is already %s....before running case, so exiting from case run as cluster not ready.", nodeName, vmState)
}
exutil.By("3. When the leader master node is unavailable, apiservers continue to serve after a short interruption.")
// Adding wait time here of 240s because sometimes wait poll takes more than 30s to complete for osp, azurestack and vsphere platform.
if platform == "openstack" || isAzureStack || platform == "vsphere" {
expectedOutageTime = 240
}
waitTime := expectedOutageTime + 30
timeFirstServiceDisruption := time.Now()
isFirstServiceDisruption := false
anyDisruptionOccurred := false
e2e.Logf("#### Watching start time(s) :: %v ####\n", time.Now().Format(time.RFC3339))
apiserverOutageWatcher := wait.Poll(3*time.Second, time.Duration(waitTime)*time.Second, func() (bool, error) {
checkHealth := func(description string, command []string) error {
_, err := exec.Command(command[0], command[1:]...).Output()
if err != nil {
e2e.Logf("%v :: %s failed :: %s\n", time.Now().Format(time.RFC3339), description, err)
if !isFirstServiceDisruption {
isFirstServiceDisruption = true
timeFirstServiceDisruption = time.Now()
}
return err
}
e2e.Logf("%v :: %s succeeded\n", time.Now().Format(time.RFC3339), description)
return nil
}
getNodeError := checkHealth("KAS health check: obtaining the status of nodes", []string{"oc", "get", "node"})
loginError := checkHealth("OAUTH health check: user admin login", []string{"oc", "login", "-u", "system:admin", "-n", "default"})
getProjectError := checkHealth("OAS health check: obtaining the status of project openshift-apiserver", []string{"bash", "-c", "oc get project/openshift-apiserver 2>&1"})
if isFirstServiceDisruption {
anyDisruptionOccurred = true
e2e.Logf("The first disruption of openshift-apiserver occurred :: %v", timeFirstServiceDisruption.Format(time.RFC3339))
// Check if all apiservers are ready.
if getNodeError == nil && loginError == nil && getProjectError == nil {
if checkHealth("Re-checking node status for KAS health", []string{"oc", "get", "node"}) == nil &&
checkHealth("Re-checking user admin login for OAUTH health", []string{"oc", "login", "-u", "system:admin", "-n", "default"}) == nil &&
checkHealth("Re-checking project openshift-apiserver status for OAS health", []string{"bash", "-c", "oc get project/openshift-apiserver 2>&1"}) == nil {
serviceRecoveryTime := time.Now()
e2e.Logf("#### The cluster apiservers have been recovered at time :: %v ####\n", serviceRecoveryTime.Format("2006-01-02 15:04:05"))
diff := serviceRecoveryTime.Sub(timeFirstServiceDisruption)
e2e.Logf("#### Apiservers outage time(s) :: %f ####\n", diff.Seconds())
if int(diff.Seconds()) > expectedOutageTime {
return false, fmt.Errorf(fmt.Sprintf("service of apiserver disruption time is %d", int(diff.Seconds())))
}
return true, nil
}
}
}
return false, nil
})
if !anyDisruptionOccurred {
e2e.Logf("No disruptions occurred during the test.")
} else {
exutil.AssertWaitPollNoErr(apiserverOutageWatcher, fmt.Sprintf("%v, expected time: %v", apiserverOutageWatcher, expectedOutageTime))
}
exutil.By("4. During the leader master node is unavailable, verify the cluster availability")
err = ClusterSanitycheck(oc, randProject1)
if err == nil {
e2e.Logf("Post down leader master node, cluster availability sanity check passed")
} else {
e2e.Failf("Post down leader master node, cluster availability sanity check failed :: %s ", err)
}
e2e.Logf("Ensure that leader master node has been stopped completedly.")
waitTime = 240
err = wait.Poll(10*time.Second, time.Duration(waitTime)*time.Second, func() (bool, error) {
vmState, stateErr := node.State()
o.Expect(stateErr).NotTo(o.HaveOccurred())
if _, ok := stopStates[vmState]; ok {
e2e.Logf("The leader master node %s has been stopped completely!", nodeName)
return true, nil
} else {
e2e.Logf("The leader master node %s is in %s vmState!", nodeName, vmState)
return false, nil
}
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The leader master node %s was unable to stop!", nodeName))
e2e.Logf("Starting leader master node")
err = node.Start()
o.Expect(err).NotTo(o.HaveOccurred())
// Wait for some time and then check the status to avoid a fake start
time.Sleep(10 * time.Second)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 240*time.Second, false, func(cxt context.Context) (bool, error) {
vmState, stateErr := node.State()
if stateErr != nil {
return false, stateErr
}
if _, ok := startStates[vmState]; ok {
e2e.Logf("The leader master node %s has been started completely!", nodeName)
return true, nil
} else {
e2e.Logf("The leader master node %s is in %s vmState!", nodeName, vmState)
return false, nil
}
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The leader master node %s was unable to start!", nodeName))
exutil.By("5. After restarted the leader master node, verify the cluster availability")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=30s", "--timeout=20m").Execute()
if err == nil {
e2e.Logf("Post restarting the leader master node, cluster health check passed")
} else {
e2e.Failf("Post restarting the leader master node, cluster health check failed :: %s ", err)
}
if platform == "openstack" {
workaroundForOCPBUGS44608()
}
}) | |||||
test case | openshift/openshift-tests-private | b1bebcb4-0dfa-44ee-9f3e-03b5dc191a0c | ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Author:kewang-Medium-67718-[Apiserver] The cluster still works well after restarted frequently multiple times [Disruptive] | ['"context"', '"fmt"', '"math/rand"', '"os"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_dr.go | g.It("ROSA-ARO-OSD_CCS-NonPreRelease-Longduration-Author:kewang-Medium-67718-[Apiserver] The cluster still works well after restarted frequently multiple times [Disruptive]", func() {
e2e.Logf(">> Restart cluster reliability test <<")
restartNum := 1
// The number of tests depends on the size of the value of the ENV var TEST_TIMEOUT_DISASTERRECOVERY
// There are some reliability test profiles of Prow CI which define ENV var TEST_TIMEOUT_DISASTERRECOVERY
// For the reliability test, the number of tests is in this range(20,50)
testTimeout, exists := os.LookupEnv("TEST_TIMEOUT_DISASTERRECOVERY")
if exists && testTimeout != "" {
t, err := strconv.Atoi(testTimeout)
o.Expect(err).NotTo(o.HaveOccurred())
if t >= 900 {
restartNum = int(getRandomNum(20, 50))
}
}
e2e.Logf("#### Total restart num:%d ####", restartNum)
restartCluster := func() bool {
var (
dirname = "/tmp/-OCP-67718/"
n = 0
)
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=60s", "--timeout=30m").Output()
if err != nil {
g.Skip(fmt.Sprintf("Cluster health check failed before restart cluster :: %s ", err))
}
exutil.By("1. Get nodes of cluster")
masterNodes, cleanup := GetNodes(oc.NotShowInfo(), "master")
if cleanup != nil {
defer cleanup()
}
workerNodes, cleanup := GetNodes(oc.NotShowInfo(), "worker")
if cleanup != nil {
defer cleanup()
}
exutil.By("2. Shut down nodes to stop cluster.")
stopNodesOfCluster := func(nodes ComputeNodes, shutdownType int) {
// The method GetNodes returns short name list on GCP, have to handle with separately
var gcpNodeFullName []string
if exutil.CheckPlatform(oc) == "gcp" && shutdownType == 2 {
gcpMasters := getNodeListByLabel(oc.NotShowInfo(), "node-role.kubernetes.io/master=")
gcpWorkers := getNodeListByLabel(oc.NotShowInfo(), "node-role.kubernetes.io/worker=")
gcpNodeFullName = append(gcpMasters, gcpWorkers...)
for _, nodeName := range gcpNodeFullName {
_, err = exutil.DebugNodeWithChroot(oc.NotShowInfo(), nodeName, "shutdown", "-h", "1")
}
return
}
for _, node := range nodes {
vmState, stateErr := node.State()
nodeName := node.GetName()
o.Expect(stateErr).NotTo(o.HaveOccurred())
o.Expect(vmState).ShouldNot(o.BeEmpty(), fmt.Sprintf("Not able to get node %s machine instance state", nodeName))
if _, ok := startStates[vmState]; ok {
if shutdownType == 1 {
stateErr = node.Stop()
} else {
_, stateErr = exutil.DebugNodeRetryWithOptionsAndChroot(oc.NotShowInfo(), nodeName, []string{"--to-namespace=openshift-kube-apiserver"}, "shutdown", "-h", "1")
}
o.Expect(stateErr).NotTo(o.HaveOccurred())
} else {
e2e.Logf("The node %s are not active :: %s", nodeName, err)
}
}
}
// Number 1 indicates indicates force shutdown, 2 indicates soft shutdown
shutdownType := rand.Intn(2-1+1) + 1
if shutdownType == 1 {
e2e.Logf("Force node shutdown ...")
} else {
e2e.Logf("Nodes are being soft shutdown ...")
}
// Keep this order, worker nodes first, then master nodes, especially soft shutdown
stopNodesOfCluster(workerNodes, shutdownType)
stopNodesOfCluster(masterNodes, shutdownType)
exutil.By("3. Waiting for the cluster to shutdown completely...")
nodes := append(masterNodes, workerNodes...)
numOfNodes := len(nodes)
duration := time.Duration(300)
if shutdownType == 2 {
duration = time.Duration(480)
}
err = wait.PollUntilContextTimeout(context.Background(), 25*time.Second, duration*time.Second, false, func(cxt context.Context) (bool, error) {
poweroffDone := false
for i := 0; i < len(nodes); i++ {
vmState, stateErr := nodes[i].State()
if stateErr != nil {
return false, stateErr
}
if _, ok := stopStates[vmState]; ok {
n += 1
// Remove completely stopped node
nodes = append(nodes[:i], nodes[i+1:]...)
i--
}
}
if n == numOfNodes {
poweroffDone = true
}
e2e.Logf("%d/%d nodes have been stopped completely!", n, numOfNodes)
return poweroffDone, nil
})
exutil.AssertWaitPollNoErr(err, "The clsuter was unable to stop!")
exutil.By("4. Start nodes again after the cluster has been shut down completely")
n = 0
nodes = append(masterNodes, workerNodes...)
for _, node := range nodes {
err = node.Start()
if err != nil {
e2e.Failf("Failed to start the node %s", node.GetName())
}
}
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, duration*time.Second, false, func(cxt context.Context) (bool, error) {
poweronDone := false
for i := 0; i < len(nodes); i++ {
vmState, stateErr := nodes[i].State()
if stateErr != nil {
return false, stateErr
}
if _, ok := startStates[vmState]; ok {
n += 1
// Remove completely stopped node
nodes = append(nodes[:i], nodes[i+1:]...)
i--
}
}
if n == numOfNodes {
poweronDone = true
}
e2e.Logf("%d/%d nodes have been started completely!", n, numOfNodes)
return poweronDone, nil
})
exutil.AssertWaitPollNoErr(err, "The clsuter was unable to start up!")
exutil.By("5. After restarted nodes of the cluster, verify the cluster availability")
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period=60s", "--timeout=30m").Output()
if err != nil {
e2e.Logf("Post restarting the cluster, cluster health check failed :: %s ", err)
return false
}
return true
}
for i := 0; i < restartNum; i++ {
if ok := restartCluster(); ok {
e2e.Logf("The cluster restart %d: Succeeded", i+1)
}
}
}) | |||||
file | openshift/openshift-tests-private | e6cd0625-1a2d-49bf-8645-a0fc3966e7fd | apiserver_util | import (
"context"
"fmt"
"math/rand"
"os/exec"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | package disasterrecovery
import (
"context"
"fmt"
"math/rand"
"os/exec"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
const (
asAdmin = true
withoutNamespace = true
contain = false
ok = true
defaultRegistryServiceURL = "image-registry.openshift-image-registry.svc:5000"
)
var (
startStates = map[string]bool{
exutil.BMPoweredOn: true,
"running": true,
"active": true,
"ready": true,
}
stopStates = map[string]bool{
exutil.BMPoweredOff: true,
"stopped": true,
"shutoff": true,
"terminated": true,
"paused": true,
"deallocated": true,
"notready": true,
}
)
// ClusterSanitycheck do sanity check on cluster.
func ClusterSanitycheck(oc *exutil.CLI, projectName string) error {
e2e.Logf("Running cluster sanity")
// Helper function for polling
pollAndLog := func(interval, timeout time.Duration, action func() error, successMsg, errorMsg string) error {
err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(cxt context.Context) (bool, error) {
if errDo := action(); errDo != nil {
return false, nil
}
e2e.Logf(successMsg)
return true, nil
})
if err != nil {
return fmt.Errorf(errorMsg)
}
return nil
}
// Create or switch to the project
err := pollAndLog(10*time.Second, 600*time.Second, func() error {
cmd := fmt.Sprintf(`oc new-project %s --skip-config-write || oc project %s`, projectName, projectName)
_, err := exec.Command("bash", "-c", cmd).Output()
return err
}, fmt.Sprintf("oc new-project %s succeeded", projectName), fmt.Sprintf("oc new-project %s failed", projectName))
if err != nil {
return err
}
err = pollAndLog(10*time.Second, 600*time.Second, func() error {
// Run the oc new-app command
output, err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", projectName).Output()
// Check for specific errors related to existing resources
if err != nil && strings.Contains(output, "already exists") {
// Log the conflict
e2e.Logf("Resource already exists, attempting to clean up...")
// Delete existing deployment and service
errDel := oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment", "hello-openshift", "-n", projectName).Execute()
if errDel != nil {
return fmt.Errorf("failed to delete existing deployment: %v", errDel)
}
errDel = oc.AsAdmin().WithoutNamespace().Run("delete").Args("service", "hello-openshift", "-n", projectName).Execute()
if errDel != nil {
return fmt.Errorf("failed to delete existing service: %v", errDel)
}
// Retry the oc new-app command after cleanup
err = oc.AsAdmin().WithoutNamespace().Run("new-app").
Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", projectName).Execute()
if err != nil {
return fmt.Errorf("failed to create new app after cleanup: %v", err)
}
}
// If there are no errors or the app was created successfully
return err
}, "oc new app succeeded", "oc new app failed")
// Return the error if the process fails
if err != nil {
return err
}
// Check deployment logs
err = pollAndLog(15*time.Second, 900*time.Second, func() error {
return oc.AsAdmin().WithoutNamespace().Run("logs").
Args("deployment/hello-openshift", "-n", projectName).Execute()
}, "oc log deployment succeeded", "oc log deployment failed")
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil {
return err
}
// Get test pod name
gettestpod, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("pod", "-n", projectName, "--no-headers", "-o", `jsonpath={.items[0].metadata.name}`).Output()
if err != nil {
return err
}
// Execute command in test pod
err = pollAndLog(5*time.Second, 60*time.Second, func() error {
return oc.AsAdmin().WithoutNamespace().Run("exec").
Args("-n", projectName, gettestpod, "--", "/bin/sh", "-c", `echo 'Test'`).Execute()
}, "oc exec succeeded", "oc exec failed")
return err
}
// ClusterHealthcheck do cluster health check like pod, node and operators
func ClusterHealthcheck(oc *exutil.CLI, dirname string) error {
err := ClusterNodesHealthcheck(oc, 900, dirname)
if err != nil {
return fmt.Errorf("%s: %w", "Failed to cluster health check::Abnormal nodes found ", err)
}
err = ClusterOperatorHealthcheck(oc, 1500, dirname)
if err != nil {
return fmt.Errorf("%s: %w", "Failed to cluster health check::Abnormal cluster operators found", err)
}
// Check the load of cluster nodes
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, false, func(cxt context.Context) (bool, error) {
_, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes.metrics").Output()
if err1 != nil {
e2e.Logf("Nodes metrics are not ready!")
return false, nil
}
e2e.Logf("Nodes metrics are ready!")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Unable to get nodes metrics!")
outTop, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "nodes").Output()
e2e.Logf("#### Output load of cluster nodes ####\n%s", outTop)
err = ClusterPodsHealthcheck(oc, 900, dirname)
if err != nil {
return fmt.Errorf("%s: %w", "Failed to cluster health check::Abnormal pods found", err)
}
return nil
}
// ClusterOperatorHealthcheck check abnormal operators
func ClusterOperatorHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
e2e.Logf("Check the abnormal operators")
errCo := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
coLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "--no-headers").OutputToFile(dirname)
if err == nil {
cmd := fmt.Sprintf(`cat %v | grep -v '.True.*False.*False' || true`, coLogFile)
coLogs, err := exec.Command("bash", "-c", cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(coLogs) > 0 {
return false, nil
}
} else {
return false, nil
}
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("No abnormality found in cluster operators...")
return true, nil
})
if errCo != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errCo
}
// ClusterPodsHealthcheck check abnormal pods.
func ClusterPodsHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
e2e.Logf("Check the abnormal pods")
var podLogs []byte
errPod := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
podLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-A").OutputToFile(dirname)
if err == nil {
cmd := fmt.Sprintf(`cat %v | grep -ivE 'Running|Completed|namespace|installer' || true`, podLogFile)
podLogs, err = exec.Command("bash", "-c", cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(podLogs) > 0 {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("No abnormality found in pods...")
return true, nil
})
if errPod != nil {
e2e.Logf("%s", podLogs)
}
return errPod
}
// ClusterNodesHealthcheck check abnormal nodes
func ClusterNodesHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
errNode := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
if err == nil {
if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("Nodes are normal...")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
})
if errNode != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errNode
}
func isAzureStackCluster(oc *exutil.CLI) (bool, string) {
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.ToLower(cloudName) == "azurestackcloud" {
e2e.Logf("This is Azure Stack cluster.")
return true, cloudName
}
return false, ""
}
// Get a random number of int32 type [m,n], n > m
func getRandomNum(m int32, n int32) int32 {
rand.Seed(time.Now().UnixNano())
return rand.Int31n(n-m+1) + m
}
// the method is to do something with oc.
func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) {
if asAdmin && withoutNamespace {
return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output()
}
if asAdmin && !withoutNamespace {
return oc.AsAdmin().Run(action).Args(parameters...).Output()
}
if !asAdmin && withoutNamespace {
return oc.WithoutNamespace().Run(action).Args(parameters...).Output()
}
if !asAdmin && !withoutNamespace {
return oc.Run(action).Args(parameters...).Output()
}
return "", nil
}
// Get something existing resource
func getResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) {
return doAction(oc, "get", asAdmin, withoutNamespace, parameters...)
}
func getCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) map[string]string {
newStatusToCompare := make(map[string]string)
for key := range statusToCompare {
args := fmt.Sprintf(`-o=jsonpath={.status.conditions[?(.type == '%s')].status}`, key)
status, _ := getResource(oc, asAdmin, withoutNamespace, "co", coName, args)
newStatusToCompare[key] = status
}
return newStatusToCompare
}
// Helper function to remove an etcd member of the abnormal node
func removeEtcdMember(oc *exutil.CLI, pod, abnormalNode string) {
listCmd := fmt.Sprintf("etcdctl member list | awk -F, '$3 ~ /%v/ {print $1}'", abnormalNode)
memberID, err := oc.Run("exec").WithoutNamespace().
Args(pod, "-n", "openshift-etcd", "--", "sh", "-c", listCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
removeCmd := fmt.Sprintf("etcdctl member remove %s", memberID)
_, err = oc.Run("exec").WithoutNamespace().
Args(pod, "-n", "openshift-etcd", "--", "sh", "-c", removeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | c4b4e9f0-2313-4cf6-a8cf-7ba17b61a679 | ClusterSanitycheck | ['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func ClusterSanitycheck(oc *exutil.CLI, projectName string) error {
e2e.Logf("Running cluster sanity")
// Helper function for polling
pollAndLog := func(interval, timeout time.Duration, action func() error, successMsg, errorMsg string) error {
err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(cxt context.Context) (bool, error) {
if errDo := action(); errDo != nil {
return false, nil
}
e2e.Logf(successMsg)
return true, nil
})
if err != nil {
return fmt.Errorf(errorMsg)
}
return nil
}
// Create or switch to the project
err := pollAndLog(10*time.Second, 600*time.Second, func() error {
cmd := fmt.Sprintf(`oc new-project %s --skip-config-write || oc project %s`, projectName, projectName)
_, err := exec.Command("bash", "-c", cmd).Output()
return err
}, fmt.Sprintf("oc new-project %s succeeded", projectName), fmt.Sprintf("oc new-project %s failed", projectName))
if err != nil {
return err
}
err = pollAndLog(10*time.Second, 600*time.Second, func() error {
// Run the oc new-app command
output, err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", projectName).Output()
// Check for specific errors related to existing resources
if err != nil && strings.Contains(output, "already exists") {
// Log the conflict
e2e.Logf("Resource already exists, attempting to clean up...")
// Delete existing deployment and service
errDel := oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment", "hello-openshift", "-n", projectName).Execute()
if errDel != nil {
return fmt.Errorf("failed to delete existing deployment: %v", errDel)
}
errDel = oc.AsAdmin().WithoutNamespace().Run("delete").Args("service", "hello-openshift", "-n", projectName).Execute()
if errDel != nil {
return fmt.Errorf("failed to delete existing service: %v", errDel)
}
// Retry the oc new-app command after cleanup
err = oc.AsAdmin().WithoutNamespace().Run("new-app").
Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", projectName).Execute()
if err != nil {
return fmt.Errorf("failed to create new app after cleanup: %v", err)
}
}
// If there are no errors or the app was created successfully
return err
}, "oc new app succeeded", "oc new app failed")
// Return the error if the process fails
if err != nil {
return err
}
// Check deployment logs
err = pollAndLog(15*time.Second, 900*time.Second, func() error {
return oc.AsAdmin().WithoutNamespace().Run("logs").
Args("deployment/hello-openshift", "-n", projectName).Execute()
}, "oc log deployment succeeded", "oc log deployment failed")
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil {
return err
}
// Get test pod name
gettestpod, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("pod", "-n", projectName, "--no-headers", "-o", `jsonpath={.items[0].metadata.name}`).Output()
if err != nil {
return err
}
// Execute command in test pod
err = pollAndLog(5*time.Second, 60*time.Second, func() error {
return oc.AsAdmin().WithoutNamespace().Run("exec").
Args("-n", projectName, gettestpod, "--", "/bin/sh", "-c", `echo 'Test'`).Execute()
}, "oc exec succeeded", "oc exec failed")
return err
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 779a9005-c327-492a-9193-d07b330d94c7 | ClusterHealthcheck | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func ClusterHealthcheck(oc *exutil.CLI, dirname string) error {
err := ClusterNodesHealthcheck(oc, 900, dirname)
if err != nil {
return fmt.Errorf("%s: %w", "Failed to cluster health check::Abnormal nodes found ", err)
}
err = ClusterOperatorHealthcheck(oc, 1500, dirname)
if err != nil {
return fmt.Errorf("%s: %w", "Failed to cluster health check::Abnormal cluster operators found", err)
}
// Check the load of cluster nodes
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, false, func(cxt context.Context) (bool, error) {
_, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes.metrics").Output()
if err1 != nil {
e2e.Logf("Nodes metrics are not ready!")
return false, nil
}
e2e.Logf("Nodes metrics are ready!")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Unable to get nodes metrics!")
outTop, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "nodes").Output()
e2e.Logf("#### Output load of cluster nodes ####\n%s", outTop)
err = ClusterPodsHealthcheck(oc, 900, dirname)
if err != nil {
return fmt.Errorf("%s: %w", "Failed to cluster health check::Abnormal pods found", err)
}
return nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 6f31a3df-2d34-4728-96b7-ac1e5de9bf90 | ClusterOperatorHealthcheck | ['"context"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func ClusterOperatorHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
e2e.Logf("Check the abnormal operators")
errCo := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
coLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "--no-headers").OutputToFile(dirname)
if err == nil {
cmd := fmt.Sprintf(`cat %v | grep -v '.True.*False.*False' || true`, coLogFile)
coLogs, err := exec.Command("bash", "-c", cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(coLogs) > 0 {
return false, nil
}
} else {
return false, nil
}
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("No abnormality found in cluster operators...")
return true, nil
})
if errCo != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errCo
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 131feadd-ff5e-4410-8628-9613458a51a4 | ClusterPodsHealthcheck | ['"context"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func ClusterPodsHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
e2e.Logf("Check the abnormal pods")
var podLogs []byte
errPod := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
podLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-A").OutputToFile(dirname)
if err == nil {
cmd := fmt.Sprintf(`cat %v | grep -ivE 'Running|Completed|namespace|installer' || true`, podLogFile)
podLogs, err = exec.Command("bash", "-c", cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(podLogs) > 0 {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("No abnormality found in pods...")
return true, nil
})
if errPod != nil {
e2e.Logf("%s", podLogs)
}
return errPod
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 0ff8c135-649e-4e7c-b493-1f3e392a97c6 | ClusterNodesHealthcheck | ['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func ClusterNodesHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
errNode := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
if err == nil {
if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("Nodes are normal...")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
})
if errNode != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errNode
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | d92f53d3-94c7-4a42-9c33-7e92d6c5f73e | isAzureStackCluster | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func isAzureStackCluster(oc *exutil.CLI) (bool, string) {
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.ToLower(cloudName) == "azurestackcloud" {
e2e.Logf("This is Azure Stack cluster.")
return true, cloudName
}
return false, ""
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 0d2cc16f-1f82-475a-be28-cb26973ba7cb | getRandomNum | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func getRandomNum(m int32, n int32) int32 {
rand.Seed(time.Now().UnixNano())
return rand.Int31n(n-m+1) + m
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 63462608-3e9e-4807-8b74-0d359c0d1a64 | doAction | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) {
if asAdmin && withoutNamespace {
return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output()
}
if asAdmin && !withoutNamespace {
return oc.AsAdmin().Run(action).Args(parameters...).Output()
}
if !asAdmin && withoutNamespace {
return oc.WithoutNamespace().Run(action).Args(parameters...).Output()
}
if !asAdmin && !withoutNamespace {
return oc.Run(action).Args(parameters...).Output()
}
return "", nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | 37bd5f68-d9a7-4e35-bbd6-73f647289804 | getResource | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func getResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) {
return doAction(oc, "get", asAdmin, withoutNamespace, parameters...)
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | 5d6f2d0b-696e-4cf4-b21f-a5466ecc1b2a | getCoStatus | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func getCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) map[string]string {
newStatusToCompare := make(map[string]string)
for key := range statusToCompare {
args := fmt.Sprintf(`-o=jsonpath={.status.conditions[?(.type == '%s')].status}`, key)
status, _ := getResource(oc, asAdmin, withoutNamespace, "co", coName, args)
newStatusToCompare[key] = status
}
return newStatusToCompare
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 84ac6258-8a94-4402-8971-e52605b7e1d7 | removeEtcdMember | ['"fmt"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/apiserver_util.go | func removeEtcdMember(oc *exutil.CLI, pod, abnormalNode string) {
listCmd := fmt.Sprintf("etcdctl member list | awk -F, '$3 ~ /%v/ {print $1}'", abnormalNode)
memberID, err := oc.Run("exec").WithoutNamespace().
Args(pod, "-n", "openshift-etcd", "--", "sh", "-c", listCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
removeCmd := fmt.Sprintf("etcdctl member remove %s", memberID)
_, err = oc.Run("exec").WithoutNamespace().
Args(pod, "-n", "openshift-etcd", "--", "sh", "-c", removeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
} | disasterrecovery | ||||
file | openshift/openshift-tests-private | 5a2c8454-891d-4780-b6f4-ae3abf8a5026 | compute_osp | import (
"encoding/base64"
"fmt"
"os"
"regexp"
"strings"
"github.com/gophercloud/gophercloud"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | package disasterrecovery
import (
"encoding/base64"
"fmt"
"os"
"regexp"
"strings"
"github.com/gophercloud/gophercloud"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type ospInstance struct {
instance
ospObj exutil.Osp
client *gophercloud.ServiceClient
}
// Get nodes and load clouds cred with the specified label.
func GetOspNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "compute")
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newOspInstance(oc, client, nodeName))
}
return results, nil
}
// OspCredentials get creds of osp platform
func OspCredentials(oc *exutil.CLI) {
credentials, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/openstack-credentials", "-n", "kube-system", "-o", `jsonpath={.data.clouds\.yaml}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Decode the base64 credentials
credential, err := base64.StdEncoding.DecodeString(credentials)
o.Expect(err).NotTo(o.HaveOccurred())
// Define variables for the credentials
var (
username string
password string
projectID string
authURL string
userDomainName string
regionName string
projectName string
authType string
applicationCredentialId string
applicationCredentialSecret string
)
// Define mappings for credentials to environment variables
credMap := map[string]*string{
"auth_url": &authURL,
"username": &username,
"password": &password,
"project_id": &projectID,
"user_domain_name": &userDomainName,
"region_name": ®ionName,
"project_name": &projectName,
"auth_type": &authType,
"application_credential_id": &applicationCredentialId,
"application_credential_secret": &applicationCredentialSecret,
}
// Extract and set each credential variable using regex
for yamlKey, credVar := range credMap {
r := regexp.MustCompile(yamlKey + `:\s*([^\n]+)`)
match := r.FindStringSubmatch(string(credential))
if len(match) == 2 {
*credVar = strings.TrimSpace(match[1])
}
// Set environment variable
envVarName := fmt.Sprintf("OSP_DR_%s", strings.ToUpper(yamlKey))
os.Setenv(envVarName, *credVar)
}
}
func newOspInstance(oc *exutil.CLI, client *gophercloud.ServiceClient, nodeName string) *ospInstance {
return &ospInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
ospObj: exutil.Osp{},
client: client,
}
}
func (osp *ospInstance) GetInstanceID() (string, error) {
instanceID, err := osp.ospObj.GetOspInstance(osp.client, osp.nodeName)
if err != nil {
e2e.Logf("Get instance id failed with error :: %v.", err)
return "", err
}
return instanceID, nil
}
func (osp *ospInstance) Start() error {
instanceState, err := osp.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = osp.ospObj.GetStartOspInstance(osp.client, osp.nodeName)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", osp.nodeName, instanceState)
}
return nil
}
func (osp *ospInstance) Stop() error {
instanceState, err := osp.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = osp.ospObj.GetStopOspInstance(osp.client, osp.nodeName)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", osp.nodeName, instanceState)
}
return nil
}
func (osp *ospInstance) State() (string, error) {
instanceState, err := osp.ospObj.GetOspInstanceState(osp.client, osp.nodeName)
if err == nil {
e2e.Logf("VM %s is : %s", osp.nodeName, strings.ToLower(instanceState))
return strings.ToLower(instanceState), nil
}
return "", err
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | e5607201-f135-4b2c-b520-bf63276e98ac | GetOspNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func GetOspNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "compute")
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newOspInstance(oc, client, nodeName))
}
return results, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | 7ffac5f0-9c82-4932-af9c-1b7ca7684e91 | OspCredentials | ['"encoding/base64"', '"fmt"', '"os"', '"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func OspCredentials(oc *exutil.CLI) {
credentials, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/openstack-credentials", "-n", "kube-system", "-o", `jsonpath={.data.clouds\.yaml}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Decode the base64 credentials
credential, err := base64.StdEncoding.DecodeString(credentials)
o.Expect(err).NotTo(o.HaveOccurred())
// Define variables for the credentials
var (
username string
password string
projectID string
authURL string
userDomainName string
regionName string
projectName string
authType string
applicationCredentialId string
applicationCredentialSecret string
)
// Define mappings for credentials to environment variables
credMap := map[string]*string{
"auth_url": &authURL,
"username": &username,
"password": &password,
"project_id": &projectID,
"user_domain_name": &userDomainName,
"region_name": ®ionName,
"project_name": &projectName,
"auth_type": &authType,
"application_credential_id": &applicationCredentialId,
"application_credential_secret": &applicationCredentialSecret,
}
// Extract and set each credential variable using regex
for yamlKey, credVar := range credMap {
r := regexp.MustCompile(yamlKey + `:\s*([^\n]+)`)
match := r.FindStringSubmatch(string(credential))
if len(match) == 2 {
*credVar = strings.TrimSpace(match[1])
}
// Set environment variable
envVarName := fmt.Sprintf("OSP_DR_%s", strings.ToUpper(yamlKey))
os.Setenv(envVarName, *credVar)
}
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 9db8e3e2-93c6-4e93-b332-188848ccd71c | newOspInstance | ['"github.com/gophercloud/gophercloud"'] | ['ospInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func newOspInstance(oc *exutil.CLI, client *gophercloud.ServiceClient, nodeName string) *ospInstance {
return &ospInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
ospObj: exutil.Osp{},
client: client,
}
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 089ecd78-3fdc-4610-8cfe-4586ac1ca6a6 | GetInstanceID | ['ospInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func (osp *ospInstance) GetInstanceID() (string, error) {
instanceID, err := osp.ospObj.GetOspInstance(osp.client, osp.nodeName)
if err != nil {
e2e.Logf("Get instance id failed with error :: %v.", err)
return "", err
}
return instanceID, nil
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | a4bafa6c-be8e-4560-8d42-c14c0e032535 | Start | ['"fmt"'] | ['ospInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func (osp *ospInstance) Start() error {
instanceState, err := osp.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = osp.ospObj.GetStartOspInstance(osp.client, osp.nodeName)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", osp.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 474fd91a-fc50-4637-8e17-3ba090dc2f0a | Stop | ['"fmt"'] | ['ospInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func (osp *ospInstance) Stop() error {
instanceState, err := osp.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = osp.ospObj.GetStopOspInstance(osp.client, osp.nodeName)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", osp.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 6fc9bbc0-8819-471a-ad35-e925ae46ab8c | State | ['"strings"'] | ['ospInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_osp.go | func (osp *ospInstance) State() (string, error) {
instanceState, err := osp.ospObj.GetOspInstanceState(osp.client, osp.nodeName)
if err == nil {
e2e.Logf("VM %s is : %s", osp.nodeName, strings.ToLower(instanceState))
return strings.ToLower(instanceState), nil
}
return "", err
} | disasterrecovery | |||
file | openshift/openshift-tests-private | 121fbfaf-ab6e-480f-bea4-c100e5a954b1 | compute_vsphere | import (
"encoding/base64"
"fmt"
"net/url"
"os"
"strings"
"time"
"github.com/tidwall/gjson"
"github.com/vmware/govmomi"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v3"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | package disasterrecovery
import (
"encoding/base64"
"fmt"
"net/url"
"os"
"strings"
"time"
"github.com/tidwall/gjson"
"github.com/vmware/govmomi"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v3"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type vsphereInstance struct {
instance
vspObj *exutil.Vmware
vspClient *govmomi.Client
vmRelativePath string
}
// Get nodes and load clouds cred with the specified label.
func GetVsphereNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
vspObj, vspClient, vmRelativePath := VsphereCloudClient(oc)
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newVsphereInstance(oc, vspObj, vspClient, nodeName, vmRelativePath))
}
return results, nil
}
// VsphereCloudClient pass env details to login function, and used to login
func VsphereCloudClient(oc *exutil.CLI) (*exutil.Vmware, *govmomi.Client, string) {
randomStr := exutil.GetRandomString()
dirname := fmt.Sprintf("/tmp/-dr_vsphere_login_%s/", randomStr)
defer os.RemoveAll(dirname)
os.MkdirAll(dirname, 0755)
credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/vsphere-creds", "-n", "kube-system", "-o", `jsonpath={.data}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
output := gjson.Parse(credential).Value().(map[string]interface{})
var accessKeyIDBase64 string
var secureKeyBase64 string
for key, value := range output {
if strings.Contains(key, "username") {
accessKeyIDBase64 = fmt.Sprint(value)
} else if strings.Contains(key, "password") {
secureKeyBase64 = fmt.Sprint(value)
}
}
accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64)
o.Expect(err1).NotTo(o.HaveOccurred())
secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64)
o.Expect(err2).NotTo(o.HaveOccurred())
vSphereConfigFile, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-provider-config", "-n", "openshift-config", "-o", `jsonpath={.data.config}`).OutputToFile("dr_vsphere_login_" + randomStr + "/server.ini")
o.Expect(err3).NotTo(o.HaveOccurred())
envURL, vmRelativePath, err4 := getvSphereServerConfig(oc, vSphereConfigFile)
o.Expect(err4).NotTo(o.HaveOccurred())
envUsername := string(accessKeyID)
envPassword := string(secureKey)
encodedPassword := url.QueryEscape(envPassword)
govmomiURL := fmt.Sprintf("https://%s:%s@%s/sdk", envUsername, encodedPassword, envURL)
vmware := exutil.Vmware{GovmomiURL: govmomiURL}
vm, client := vmware.Login()
return vm, client, vmRelativePath
}
func newVsphereInstance(oc *exutil.CLI, vspObj *exutil.Vmware, vspClient *govmomi.Client, nodeName string, vmRelativePath string) *vsphereInstance {
return &vsphereInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
vspObj: vspObj,
vspClient: vspClient,
vmRelativePath: vmRelativePath,
}
}
func (vs *vsphereInstance) GetInstanceID() (string, error) {
var instanceID string
var err error
errVmId := wait.Poll(10*time.Second, 200*time.Second, func() (bool, error) {
instanceID, err = vs.vspObj.GetVspheresInstance(vs.vspClient, vs.vmRelativePath+vs.nodeName)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errVmId, fmt.Sprintf("Failed to get VM instance ID for node: %s, error: %s", vs.nodeName, err))
return instanceID, err
}
func (vs *vsphereInstance) Start() error {
instanceState, err := vs.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = vs.vspObj.StartVsphereInstance(vs.vspClient, vs.vmRelativePath+vs.nodeName)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", vs.nodeName, instanceState)
}
return nil
}
func (vs *vsphereInstance) Stop() error {
instanceState, err := vs.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = vs.vspObj.StopVsphereInstance(vs.vspClient, vs.vmRelativePath+vs.nodeName)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", vs.nodeName, instanceState)
}
return nil
}
func (vs *vsphereInstance) State() (string, error) {
instanceState, statusErr := vs.vspObj.GetVspheresInstanceState(vs.vspClient, vs.vmRelativePath+vs.nodeName)
return strings.ToLower(instanceState), statusErr
}
// vSphereConfig is a struct to represent the YAML structure.
type vSphereConfig struct {
Vcenter map[string]struct {
Server string `yaml:"server"`
Datacenters []string `yaml:"datacenters"`
} `yaml:"vcenter"`
}
// getvSphereServerConfig handles both INI and YAML configurations.
func getvSphereServerConfig(oc *exutil.CLI, vSphereConfigFile string) (string, string, error) {
fileContent, err := os.ReadFile(vSphereConfigFile)
if err != nil {
return "", "", fmt.Errorf("error reading configuration file: %s", err)
}
// Try to parse as INI format
cfg, err := ini.Load(vSphereConfigFile)
if err == nil {
// INI parsing succeeded, extract values
serverURL := cfg.Section("Workspace").Key("server").String()
vmRelativePath := cfg.Section("Workspace").Key("folder").String()
return serverURL, vmRelativePath + "/", nil
}
// If INI parsing fails, try parsing as YAML
var yamlConfig vSphereConfig
err = yaml.Unmarshal(fileContent, &yamlConfig)
if err != nil {
return "", "", fmt.Errorf("error parsing configuration as YAML: %s", err)
}
// Extract values from the YAML structure
for _, vcenter := range yamlConfig.Vcenter {
if vcenter.Server != "" {
serverURL := vcenter.Server
var vmRelativePath string
if len(vcenter.Datacenters) > 0 {
vmRelativePath = vcenter.Datacenters[0]
}
infrastructureName := clusterinfra.GetInfrastructureName(oc)
o.Expect(infrastructureName).ShouldNot(o.BeEmpty(), "The infrastructure name should not be empty")
return serverURL, "/" + vmRelativePath + "/vm/" + infrastructureName + "/", nil
}
}
return "", "", fmt.Errorf("no valid configuration found")
}
| package disasterrecovery | ||||
function | openshift/openshift-tests-private | 1e929712-d535-4ff2-8109-ae3532081d02 | GetVsphereNodes | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func GetVsphereNodes(oc *exutil.CLI, label string) ([]ComputeNode, func()) {
nodeNames, err := exutil.GetClusterNodesBy(oc, label)
o.Expect(err).NotTo(o.HaveOccurred())
vspObj, vspClient, vmRelativePath := VsphereCloudClient(oc)
var results []ComputeNode
for _, nodeName := range nodeNames {
results = append(results, newVsphereInstance(oc, vspObj, vspClient, nodeName, vmRelativePath))
}
return results, nil
} | disasterrecovery | |||||
function | openshift/openshift-tests-private | 0627057a-3884-4c62-b1ee-83d5507bc56a | VsphereCloudClient | ['"encoding/base64"', '"fmt"', '"net/url"', '"os"', '"strings"', '"github.com/tidwall/gjson"', '"github.com/vmware/govmomi"'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func VsphereCloudClient(oc *exutil.CLI) (*exutil.Vmware, *govmomi.Client, string) {
randomStr := exutil.GetRandomString()
dirname := fmt.Sprintf("/tmp/-dr_vsphere_login_%s/", randomStr)
defer os.RemoveAll(dirname)
os.MkdirAll(dirname, 0755)
credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/vsphere-creds", "-n", "kube-system", "-o", `jsonpath={.data}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
output := gjson.Parse(credential).Value().(map[string]interface{})
var accessKeyIDBase64 string
var secureKeyBase64 string
for key, value := range output {
if strings.Contains(key, "username") {
accessKeyIDBase64 = fmt.Sprint(value)
} else if strings.Contains(key, "password") {
secureKeyBase64 = fmt.Sprint(value)
}
}
accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64)
o.Expect(err1).NotTo(o.HaveOccurred())
secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64)
o.Expect(err2).NotTo(o.HaveOccurred())
vSphereConfigFile, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-provider-config", "-n", "openshift-config", "-o", `jsonpath={.data.config}`).OutputToFile("dr_vsphere_login_" + randomStr + "/server.ini")
o.Expect(err3).NotTo(o.HaveOccurred())
envURL, vmRelativePath, err4 := getvSphereServerConfig(oc, vSphereConfigFile)
o.Expect(err4).NotTo(o.HaveOccurred())
envUsername := string(accessKeyID)
envPassword := string(secureKey)
encodedPassword := url.QueryEscape(envPassword)
govmomiURL := fmt.Sprintf("https://%s:%s@%s/sdk", envUsername, encodedPassword, envURL)
vmware := exutil.Vmware{GovmomiURL: govmomiURL}
vm, client := vmware.Login()
return vm, client, vmRelativePath
} | disasterrecovery | ||||
function | openshift/openshift-tests-private | 4c90c58c-8bca-4094-92d8-8ec91bd42808 | newVsphereInstance | ['"github.com/vmware/govmomi"'] | ['vsphereInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func newVsphereInstance(oc *exutil.CLI, vspObj *exutil.Vmware, vspClient *govmomi.Client, nodeName string, vmRelativePath string) *vsphereInstance {
return &vsphereInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
vspObj: vspObj,
vspClient: vspClient,
vmRelativePath: vmRelativePath,
}
} | disasterrecovery | |||
function | openshift/openshift-tests-private | f4b0f1fd-06b9-4e0b-8e92-877f61d57c30 | GetInstanceID | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['vsphereInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func (vs *vsphereInstance) GetInstanceID() (string, error) {
var instanceID string
var err error
errVmId := wait.Poll(10*time.Second, 200*time.Second, func() (bool, error) {
instanceID, err = vs.vspObj.GetVspheresInstance(vs.vspClient, vs.vmRelativePath+vs.nodeName)
if err == nil {
e2e.Logf("VM instance name: %s", instanceID)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errVmId, fmt.Sprintf("Failed to get VM instance ID for node: %s, error: %s", vs.nodeName, err))
return instanceID, err
} | disasterrecovery | |||
function | openshift/openshift-tests-private | 1ae21f8a-3463-462d-8d1b-a86dc09d49b5 | Start | ['"fmt"'] | ['vsphereInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func (vs *vsphereInstance) Start() error {
instanceState, err := vs.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := stopStates[instanceState]; ok {
err = vs.vspObj.StartVsphereInstance(vs.vspClient, vs.vmRelativePath+vs.nodeName)
if err != nil {
return fmt.Errorf("start instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to start instance %s from status %s", vs.nodeName, instanceState)
}
return nil
} | disasterrecovery | |||
function | openshift/openshift-tests-private | bc9cfb89-3321-46dc-b692-90e1e4354da9 | Stop | ['"fmt"'] | ['vsphereInstance'] | github.com/openshift/openshift-tests-private/test/extended/disaster_recovery/compute_vsphere.go | func (vs *vsphereInstance) Stop() error {
instanceState, err := vs.State()
o.Expect(err).NotTo(o.HaveOccurred())
if _, ok := startStates[instanceState]; ok {
err = vs.vspObj.StopVsphereInstance(vs.vspClient, vs.vmRelativePath+vs.nodeName)
if err != nil {
return fmt.Errorf("stop instance failed with error :: %v", err)
}
} else {
return fmt.Errorf("unalbe to stop instance %s from status %s", vs.nodeName, instanceState)
}
return nil
} | disasterrecovery |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.