element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | 189d8396-bc51-424a-b55b-978581bc3072 | getTestOCPImage | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getTestOCPImage() string {
testImageVersion := "4.19"
testOCPImage, err := exutil.GetLatestNightlyImage(testImageVersion)
o.Expect(err).NotTo(o.HaveOccurred())
if testOCPImage == "" {
e2e.Failf("Failed to get image for version %v", testImageVersion)
}
return testOCPImage
} | hive | |||||
function | openshift/openshift-tests-private | 9e10de5e-28aa-45e7-83a0-0698c02b4fb6 | getCondition | ['"encoding/json"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getCondition(oc *exutil.CLI, kind, resourceName, namespace, conditionType string) map[string]string {
e2e.Logf("Extracting the %v condition from %v/%v in namespace %v", conditionType, kind, resourceName, namespace)
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, resourceName, "-n", namespace, fmt.Sprintf("-o=jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType)).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
var condition map[string]string
// Avoid Unmarshal failure when stdout is empty
if len(stdout) == 0 {
e2e.Logf("Condition %v not found on %v/%v in namespace %v", conditionType, kind, resourceName, namespace)
return condition
}
err = json.Unmarshal([]byte(stdout), &condition)
o.Expect(err).NotTo(o.HaveOccurred())
return condition
} | hive | ||||
function | openshift/openshift-tests-private | 2657dfaf-61ec-496d-8e2c-f6881c50cd25 | checkCondition | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func checkCondition(oc *exutil.CLI, kind, resourceName, namespace, conditionType string, expectKeyValue map[string]string, hint string) func() bool {
e2e.Logf(hint)
return func() bool {
condition := getCondition(oc, kind, resourceName, namespace, conditionType)
for key, expectValue := range expectKeyValue {
if actualValue, ok := condition[key]; !ok || actualValue != expectValue {
e2e.Logf("For condition %s's %s, expected value is %s, actual value is %v, retrying ...", conditionType, key, expectValue, actualValue)
return false
}
}
e2e.Logf("For condition %s, all fields checked are expected, proceeding to the next step ...", conditionType)
return true
}
} | hive | |||||
function | openshift/openshift-tests-private | 444d78e1-09cf-4c78-b7e7-7ceffcbbc382 | getAWSCredentials | ['"context"', '"os"', '"regexp"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getAWSCredentials(oc *exutil.CLI, mountPaths ...string) (AWSAccessKeyID string, AWSSecretAccessKey string) {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n=kube-system").Execute()
switch {
// Try root credentials
case err == nil:
e2e.Logf("Extracting AWS credentials from root credentials")
AWSAccessKeyID, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/aws-creds", "-n=kube-system", "--keys=aws_access_key_id", "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
AWSSecretAccessKey, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/aws-creds", "-n=kube-system", "--keys=aws_secret_access_key", "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
// Try mount paths
case len(mountPaths) > 0:
e2e.Logf("Extracting AWS creds from credential mounts")
e2e.Logf("Is the test running in the CI environment, targeting a non-AWS platform ?")
re, err := regexp.Compile(AWSCredsPattern)
o.Expect(err).NotTo(o.HaveOccurred())
for _, mountPath := range mountPaths {
e2e.Logf("Extracting AWS creds from path %s", mountPath)
fileBs, err := os.ReadFile(mountPath)
if err != nil {
e2e.Logf("Failed to read file: %v", err)
continue
}
matches := re.FindStringSubmatch(string(fileBs))
if len(matches) != 3 {
e2e.Logf("Incorrect credential format")
continue
}
AWSAccessKeyID = matches[1]
AWSSecretAccessKey = matches[2]
break
}
// Fall back to external configurations
default:
e2e.Logf("Extracting AWS creds from external configurations")
e2e.Logf("Is the test running locally, targeting a non-AWS platform ?")
if cfg, err := config.LoadDefaultConfig(context.Background()); err == nil {
creds, retrieveErr := cfg.Credentials.Retrieve(context.Background())
o.Expect(retrieveErr).NotTo(o.HaveOccurred())
AWSAccessKeyID = creds.AccessKeyID
AWSSecretAccessKey = creds.SecretAccessKey
}
}
o.Expect(AWSAccessKeyID).NotTo(o.BeEmpty())
o.Expect(AWSSecretAccessKey).NotTo(o.BeEmpty())
return
} | hive | ||||
function | openshift/openshift-tests-private | 6637486d-d843-46bb-a2e4-4b4a53f599d6 | getVSphereCredentials | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getVSphereCredentials(oc *exutil.CLI, vCenter string) (username string, password string) {
var err error
username, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/vsphere-creds", "-n=kube-system", fmt.Sprintf("--keys=%v.username", vCenter), "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(username).NotTo(o.BeEmpty())
password, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/vsphere-creds", "-n=kube-system", fmt.Sprintf("--keys=%v.password", vCenter), "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
// This assertion fails only when the password is an empty string, so the password is never logged out.
o.Expect(password).NotTo(o.BeEmpty())
return
} | hive | ||||
function | openshift/openshift-tests-private | 0298a0e0-6933-4599-a45f-bd6911f9d94b | getAWSConfig | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getAWSConfig(oc *exutil.CLI, region string, secretMountPaths ...string) aws.Config {
AWSAccessKeyID, AWSSecretAccessKey := getAWSCredentials(oc, secretMountPaths...)
cfg, err := config.LoadDefaultConfig(
context.Background(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(AWSAccessKeyID, AWSSecretAccessKey, "")),
config.WithRegion(region),
)
o.Expect(err).NotTo(o.HaveOccurred())
return cfg
} | hive | ||||
function | openshift/openshift-tests-private | 2aa03ffd-4956-42d1-83e1-58b82157915c | newLegoDNSProvider | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func newLegoDNSProvider(
maxRetries, TTL int,
propagationTimeout, pollingInterval time.Duration,
accessKeyID, secretAccessKey, region string,
) (*legoroute53.DNSProvider, error) {
legoRoute53Config := &legoroute53.Config{
Region: region,
MaxRetries: maxRetries,
TTL: TTL,
PropagationTimeout: propagationTimeout,
PollingInterval: pollingInterval,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
}
return legoroute53.NewDNSProviderConfig(legoRoute53Config)
} | hive | ||||
function | openshift/openshift-tests-private | 9b2804c3-d716-4e27-94d8-8b44a3a0835b | extractHiveutil | ['"fmt"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func extractHiveutil(oc *exutil.CLI, dir string) string {
latestImgTagStr := getLatestHiveVersion()
e2e.Logf("Extracting hiveutil from image %v (latest) ...", latestImgTagStr)
err := oc.
AsAdmin().
WithoutNamespace().
Run("image", "extract").
Args(fmt.Sprintf("quay.io/%s/hive:%s", HiveImgRepoOnQuay, latestImgTagStr), "--path", "/usr/bin/hiveutil:"+dir).
Execute()
o.Expect(err).NotTo(o.HaveOccurred())
hiveutilPath := dir + "/hiveutil"
e2e.Logf("Making hiveutil executable ...")
cmd := exec.Command("chmod", "+x", hiveutilPath)
_, err = cmd.CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Making sure hiveutil is functional ...")
cmd = exec.Command(hiveutilPath)
out, err := cmd.CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(string(out)).To(o.ContainSubstring("Available Commands"))
o.Expect(string(out)).To(o.ContainSubstring("awsprivatelink"))
return hiveutilPath
} | hive | ||||
function | openshift/openshift-tests-private | dc225397-748d-4cc9-9312-722054497495 | getNodeNames | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getNodeNames(oc *exutil.CLI, labels map[string]string) []string {
e2e.Logf("Extracting Node names")
args := []string{"node"}
for k, v := range labels {
args = append(args, fmt.Sprintf("--selector=%s=%s", k, v))
}
args = append(args, "-o=jsonpath={.items[*].metadata.name}")
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames := strings.Split(stdout, " ")
e2e.Logf("Nodes extracted = %v", nodeNames)
return nodeNames
} | hive | ||||
function | openshift/openshift-tests-private | f603de73-b4be-452e-a048-fc61dc339fb0 | getMachinePoolInstancesIds | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getMachinePoolInstancesIds(oc *exutil.CLI, machinePoolName string, kubeconfigPath string) []string {
// The command below does not error out if the selector does not have a match
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args(
"machine",
fmt.Sprintf("--selector=machine.openshift.io/cluster-api-machine-role=%s", machinePoolName),
"-n", "openshift-machine-api", "-o=jsonpath={.items[*].status.providerStatus.instanceId}",
"--kubeconfig", kubeconfigPath,
).
Outputs()
// When stdout is an empty string, strings.Split(stdout, " ") = []string{""}
// We do not want this, so return an empty slice
if len(stdout) == 0 {
return []string{}
}
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Split(stdout, " ")
} | hive | ||||
function | openshift/openshift-tests-private | e7876bce-c040-49d8-9773-e9b4636ca198 | getBasedomain | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getBasedomain(oc *exutil.CLI) string {
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("dns/cluster", "-o=jsonpath={.spec.baseDomain}").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("."))
basedomain := stdout[strings.Index(stdout, ".")+1:]
e2e.Logf("Found base domain = %v", basedomain)
return basedomain
} | hive | ||||
function | openshift/openshift-tests-private | 731e2f8a-9319-4c9e-b095-6c5ea8ec51da | getRegion | ['"context"', '"strings"', '"github.com/aws/aws-sdk-go-v2/aws"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getRegion(oc *exutil.CLI) string {
infrastructure, err := oc.
AdminConfigClient().
ConfigV1().
Infrastructures().
Get(context.Background(), "cluster", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
var region string
switch platform := strings.ToLower(string(infrastructure.Status.PlatformStatus.Type)); platform {
case "aws":
region = infrastructure.Status.PlatformStatus.AWS.Region
case "azure":
region, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("nodes", "-o=jsonpath={.items[0].metadata.labels['topology\\.kubernetes\\.io/region']}").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
case "gcp":
region = infrastructure.Status.PlatformStatus.GCP.Region
default:
e2e.Failf("Unknown platform: %s", platform)
}
e2e.Logf("Found region = %v", region)
return region
} | hive | ||||
function | openshift/openshift-tests-private | e3c17f95-ac59-413d-bcae-6ddcfe91da3b | createVsphereCertsSecret | ['"context"', '"fmt"', '"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createVsphereCertsSecret(oc *exutil.CLI, ns, vCenter string) {
// Notes:
// 1) As we do not necessarily have access to the vCenter URL, we'd better run commands on the ephemeral cluster.
// 2) For some reason, /certs/download.zip might contain root certificates for a co-hosted (alias) domain.
// Provision will fail when this happens. As a result, we need to get an additional set of certificates
// with openssl, and merge those certificates into the ones obtained with wget.
// TODO: is certificates obtained though openssl sufficient themselves (probably yes) ?
e2e.Logf("Getting certificates from the ephemeral cluster")
commands := fmt.Sprintf("yum install -y unzip && "+
"wget https://%v/certs/download.zip --no-check-certificate && "+
"unzip download.zip && "+
"cat certs/lin/*.0 && "+
"openssl s_client -host %v -port 443 -showcerts", vCenter, vCenter)
// No need to recover labels set on oc.Namespace()
err := exutil.SetNamespacePrivileged(oc, oc.Namespace())
o.Expect(err).NotTo(o.HaveOccurred())
// --to-namespace is required for the CI environment, otherwise
// the API server will throw a "namespace XXX not found" error.
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("debug").
Args("--to-namespace", oc.Namespace(), "--", "bash", "-c", commands).
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
re, err := regexp.Compile(pemX509CertPattern)
o.Expect(err).NotTo(o.HaveOccurred())
matches := re.FindAllStringSubmatch(stdout, -1)
var certsSlice []string
for _, match := range matches {
certsSlice = append(certsSlice, match[0])
}
certs := strings.Join(certsSlice, "\n")
e2e.Logf("Creating Secret containing root certificates of vCenter %v", vCenter)
certSecret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: VSphereCerts,
Namespace: ns,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
".cacert": certs,
},
}
_, err = oc.AdminKubeClient().CoreV1().Secrets(ns).Create(context.Background(), certSecret, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 4b30ff91-6738-46f1-845c-fe08d166a11f | getIps2ReserveFromAWSHostedZone | ['"context"', '"net"', '"strings"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/route53"', '"github.com/aws/aws-sdk-go-v2/service/route53/types"', '"github.com/3th1nk/cidr"', 'legoroute53 "github.com/go-acme/lego/v4/providers/dns/route53"', '"k8s.io/apimachinery/pkg/util/sets"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getIps2ReserveFromAWSHostedZone(oc *exutil.CLI, hostedZoneName string, cidrBlock *cidr.CIDR, minIp net.IP,
maxIp net.IP, unavailableIps []string, awsCredsFilePath string, domains2Reserve []string) (fReserve func(),
fRelease func(), domain2Ip map[string]string) {
// Route 53 is global so any region will do
var cfg aws.Config
if awsCredsFilePath == "" {
cfg = getAWSConfig(oc, AWSRegion)
} else {
cfg = getAWSConfig(oc, AWSRegion, awsCredsFilePath)
}
route53Client := route53.NewFromConfig(cfg)
// Get hosted zone ID
var hostedZoneId *string
listHostedZonesByNameOutput, err := route53Client.ListHostedZonesByName(context.Background(),
&route53.ListHostedZonesByNameInput{
DNSName: aws.String(hostedZoneName),
},
)
o.Expect(err).NotTo(o.HaveOccurred())
hostedZoneFound := false
for _, hostedZone := range listHostedZonesByNameOutput.HostedZones {
if strings.TrimSuffix(aws.ToString(hostedZone.Name), ".") == hostedZoneName {
hostedZoneFound = true
hostedZoneId = hostedZone.Id
break
}
}
o.Expect(hostedZoneFound).To(o.BeTrue())
e2e.Logf("Found hosted zone id = %v", aws.ToString(hostedZoneId))
// Get reserved IPs in cidr
reservedIps := sets.New[string](unavailableIps...)
listResourceRecordSetsPaginator := route53.NewListResourceRecordSetsPaginator(
route53Client,
&route53.ListResourceRecordSetsInput{
HostedZoneId: hostedZoneId,
},
)
for listResourceRecordSetsPaginator.HasMorePages() {
// Get a page of record sets
listResourceRecordSetsOutput, listResourceRecordSetsErr := listResourceRecordSetsPaginator.NextPage(context.Background())
o.Expect(listResourceRecordSetsErr).NotTo(o.HaveOccurred())
// Iterate records, mark IPs which belong to the cidr block as reservedIps
for _, recordSet := range listResourceRecordSetsOutput.ResourceRecordSets {
for _, resourceRecord := range recordSet.ResourceRecords {
if ip := aws.ToString(resourceRecord.Value); cidrBlock.Contains(ip) {
reservedIps.Insert(ip)
}
}
}
}
e2e.Logf("Found reserved IPs = %v", reservedIps.UnsortedList())
// Get available IPs in cidr which do not exceed the range defined by minIp and maxIp
var ips2Reserve []string
err = cidrBlock.EachFrom(minIp.String(), func(ip string) bool {
// Stop if IP exceeds maxIp or no more IPs to reserve
if cidr.IPCompare(net.ParseIP(ip), maxIp) == 1 || len(ips2Reserve) == len(domains2Reserve) {
return false
}
// Reserve available IP
if !reservedIps.Has(ip) {
ips2Reserve = append(ips2Reserve, ip)
}
return true
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(domains2Reserve)).To(o.Equal(len(ips2Reserve)), "Not enough available IPs to reserve")
e2e.Logf("IPs to reserve = %v", ips2Reserve)
e2e.Logf("Domains to reserve = %v", domains2Reserve)
// Get functions to reserve/release IPs
var recordSetChanges4Reservation, recordSetChanges4Release []types.Change
domain2Ip = make(map[string]string)
for i, domain2Reserve := range domains2Reserve {
ip2Reserve := ips2Reserve[i]
domain2Ip[domain2Reserve] = ip2Reserve
e2e.Logf("Will reserve IP %v for domain %v", ip2Reserve, domain2Reserve)
recordSetChanges4Reservation = append(recordSetChanges4Reservation, types.Change{
Action: types.ChangeActionCreate,
ResourceRecordSet: &types.ResourceRecordSet{
Name: aws.String(domain2Reserve),
Type: types.RRTypeA,
TTL: aws.Int64(60),
ResourceRecords: []types.ResourceRecord{{Value: aws.String(ip2Reserve)}},
},
})
recordSetChanges4Release = append(recordSetChanges4Release, types.Change{
Action: types.ChangeActionDelete,
ResourceRecordSet: &types.ResourceRecordSet{
Name: aws.String(domain2Reserve),
Type: types.RRTypeA,
TTL: aws.Int64(60),
ResourceRecords: []types.ResourceRecord{{Value: aws.String(ip2Reserve)}},
},
})
}
fReserve = func() {
e2e.Logf("Reserving IP addresses with domain to IP injection %v", domain2Ip)
_, reserveErr := route53Client.ChangeResourceRecordSets(
context.Background(),
&route53.ChangeResourceRecordSetsInput{
HostedZoneId: hostedZoneId,
ChangeBatch: &types.ChangeBatch{
Changes: recordSetChanges4Reservation,
},
},
)
o.Expect(reserveErr).NotTo(o.HaveOccurred())
}
fRelease = func() {
e2e.Logf("Releasing IP addresses for domains %v", domains2Reserve)
_, releaseErr := route53Client.ChangeResourceRecordSets(
context.Background(),
&route53.ChangeResourceRecordSetsInput{
HostedZoneId: hostedZoneId,
ChangeBatch: &types.ChangeBatch{
Changes: recordSetChanges4Release,
},
},
)
o.Expect(releaseErr).NotTo(o.HaveOccurred())
}
return
} | hive | ||||
function | openshift/openshift-tests-private | 815ef76a-bc60-4ef3-bc2f-e29d1c0d306b | getVSphereCIDR | ['"net"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/3th1nk/cidr"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"'] | ['minimalInstallConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getVSphereCIDR(oc *exutil.CLI) (*cidr.CIDR, net.IP, net.IP) {
// Extracting machine network CIDR from install-config works for different network segments,
// including ci-vlan and devqe.
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("cm/cluster-config-v1", "-n", "kube-system", "--keys", "install-config", "--to", "-").
Outputs()
var ic minimalInstallConfig
o.Expect(err).NotTo(o.HaveOccurred())
err = yaml.Unmarshal([]byte(stdout), &ic)
o.Expect(err).NotTo(o.HaveOccurred())
machineNetwork := ic.Networking.MachineNetwork[0].CIDR
e2e.Logf("Found machine network segment = %v", machineNetwork)
cidrObj, err := cidr.Parse(machineNetwork)
o.Expect(err).NotTo(o.HaveOccurred())
// We need another (temporary) CIDR object which will change with begin.
cidrObjTemp, err := cidr.Parse(machineNetwork)
o.Expect(err).NotTo(o.HaveOccurred())
begin, end := cidrObjTemp.IPRange()
// The first 2 IPs should not be used
// The next 2 IPs are reserved for the Hive cluster
// We thus skip the first 4 IPs
minIpOffset := 4
for i := 0; i < minIpOffset; i++ {
cidr.IPIncr(begin)
}
e2e.Logf("Min IP = %v, max IP = %v", begin, end)
return cidrObj, begin, end
} | hive | |||
function | openshift/openshift-tests-private | 76c916ff-d7fe-4bc7-bc61-c2029498851c | getVMInternalIPs | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getVMInternalIPs(oc *exutil.CLI) []string {
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("node", "-o=jsonpath={.items[*].status.addresses[?(@.type==\"InternalIP\")].address}").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Fields(stdout)
} | hive | ||||
function | openshift/openshift-tests-private | 8d300354-5cbd-48a9-aa75-88d9ca2ce8cd | getTestEnv | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getTestEnv() (tEnv testEnv) {
if val, ok := os.LookupEnv("OPENSHIFT_CI"); ok && val == "true" {
tEnv = testEnvCI
} else if _, ok := os.LookupEnv("JENKINS_HOME"); ok {
tEnv = testEnvJenkins
} else {
tEnv = testEnvLocal
}
return
} | hive | ||||
function | openshift/openshift-tests-private | 8298353f-7bd9-41c8-a4ef-335a26b969c2 | getAWSCredsFilePath4VSphere | ['"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getAWSCredsFilePath4VSphere(tEnv testEnv) (credsFilePath string) {
switch tEnv {
case testEnvCI:
credsFilePath = VSphereAWSCredsFilePathCI
case testEnvJenkins:
e2e.Failf(`
VSphere test cases are meant to be tested locally (instead of on Jenkins).
In fact, an additional set of AWS credentials are required for DNS setup,
and those credentials are loaded using external AWS configurations (which
are only available locally) when running in non-CI environments.`)
case testEnvLocal:
// Credentials will be retrieved from external configurations using AWS tool chains when running locally.
credsFilePath = ""
default:
e2e.Failf("Unknown test environment")
}
return credsFilePath
} | hive | ||||
function | openshift/openshift-tests-private | d2231b59-3252-4658-abe0-9f0f233f5c62 | createAssumeRolePolicyDocument | ['"encoding/json"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createAssumeRolePolicyDocument(principalARN, uuid string) (string, error) {
policy := map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
{
"Effect": "Allow",
"Principal": map[string]string{
"AWS": principalARN,
},
"Action": "sts:AssumeRole",
},
},
}
if uuid != "" {
policyStatements := policy["Statement"].([]map[string]interface{})
policyStatements[0]["Condition"] = map[string]interface{}{
"StringEquals": map[string]string{
"sts:ExternalId": uuid,
},
}
}
policyJSON, err := json.MarshalIndent(policy, "", " ")
if err != nil {
return "", fmt.Errorf("failed to marshal policy: %v", err)
}
return string(policyJSON), nil
} | hive | ||||
function | openshift/openshift-tests-private | d15344cc-2d2c-47c3-8127-fa7987091fc2 | isMCEEnabled | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func isMCEEnabled(oc *exutil.CLI) bool {
e2e.Logf("Checking if MCE is enabled in the cluster")
checkMCEOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("MultiClusterEngine", "multiclusterengine-sample").Output()
if err != nil {
if strings.Contains(checkMCEOutput, "the server doesn't have a resource type \"MultiClusterEngine\"") {
return false
} else {
e2e.Failf("Failed to check if MCE is enabled in the cluster: %v", err)
}
}
return strings.Contains(checkMCEOutput, "multiclusterengine-sample")
} | hive | ||||
function | openshift/openshift-tests-private | 4e3acc38-4bf5-448d-a73f-6cb17c28bd95 | getLatestHiveVersion | ['"fmt"', '"os/exec"', '"strings"', 'corev1 "k8s.io/api/core/v1"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getLatestHiveVersion() string {
e2e.Logf("Getting tag of the latest Hive image")
cmd := exec.Command(
"bash",
"-c",
fmt.Sprintf("curl -sk https://quay.io/api/v1/repository/%s/hive/tag/ "+
"| jq '.tags | sort_by(.start_ts) | reverse | .[0].name'", HiveImgRepoOnQuay),
)
latestImgTag, err := cmd.CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
latestImgTagStr := strings.Trim(strings.TrimSuffix(string(latestImgTag), "\n"), "\"")
e2e.Logf("The latest Hive image version is %v ", latestImgTagStr)
return latestImgTagStr
} | hive | ||||
test | openshift/openshift-tests-private | 4e56ef27-0ff9-4efb-8d44-0bf4d282f128 | hive_vsphere | import (
"context"
"fmt"
"net"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/3th1nk/cidr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_vsphere.go | package hive
import (
"context"
"fmt"
"net"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/3th1nk/cidr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-hive] Cluster_Operator hive should", func() {
defer g.GinkgoRecover()
var (
// Clients
oc = exutil.NewCLI("hive", exutil.KubeConfigPath())
// Test-specific
testDataDir string
testOCPImage string
randStr string
// Platform-specific
datacenter string
datastore string
network string
networkCIDR *cidr.CIDR
minIp net.IP
maxIp net.IP
machineIPs []string
vCenter string
cluster string
basedomain string
awsCredsFilePath string
tEnv testEnv
)
// Under the hood, "extended-platform-tests run" calls "extended-platform-tests run-test" on each test
// case separately. This means that all necessary initializations need to be done before every single
// test case, either globally or in a Ginkgo node like BeforeEach.
g.BeforeEach(func() {
// Skip incompatible platforms
exutil.SkipIfPlatformTypeNot(oc, "vsphere")
architecture.SkipNonAmd64SingleArch(oc)
// Get test-specific info
testDataDir = exutil.FixturePath("testdata", "cluster_operator/hive")
testOCPImage = getTestOCPImage()
randStr = getRandomString()[:ClusterSuffixLen]
// Get platform-specific info
tEnv = getTestEnv()
awsCredsFilePath = getAWSCredsFilePath4VSphere(tEnv)
basedomain = getBasedomain(oc)
networkCIDR, minIp, maxIp = getVSphereCIDR(oc)
machineIPs = getVMInternalIPs(oc)
infrastructure, err := oc.
AdminConfigClient().
ConfigV1().
Infrastructures().
Get(context.Background(), "cluster", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
failureDomains := infrastructure.Spec.PlatformSpec.VSphere.FailureDomains
datacenter = failureDomains[0].Topology.Datacenter
datastore = failureDomains[0].Topology.Datastore
network = failureDomains[0].Topology.Networks[0]
vCenter = failureDomains[0].Server
cluster = failureDomains[0].Topology.ComputeCluster
e2e.Logf(`Found platform-specific info:
- Datacenter: %s
- Datastore: %s
- Network: %s
- Machine IPs: %s,
- vCenter Server: %s
- Cluster: %s
- Base domain: %s
- Test environment: %s
- AWS creds file path: %s`, datacenter, datastore, network, machineIPs, vCenter, cluster, basedomain, tEnv, awsCredsFilePath)
// Install Hive operator if necessary
_, _ = installHiveOperator(oc, &hiveNameSpace{}, &operatorGroup{}, &subscription{}, &hiveconfig{}, testDataDir)
})
// Author: [email protected]
// Timeout: 60min
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:fxie-High-32026-Add hive api for vsphere provisioning [Serial]", func() {
var (
testCaseID = "32026"
cdName = fmt.Sprintf("cd-%s-%s", testCaseID, randStr)
icSecretName = fmt.Sprintf("%s-install-config", cdName)
imageSetName = fmt.Sprintf("%s-imageset", cdName)
apiDomain = fmt.Sprintf("api.%v.%v", cdName, basedomain)
ingressDomain = fmt.Sprintf("*.apps.%v.%v", cdName, basedomain)
domains2Reserve = []string{apiDomain, ingressDomain}
)
exutil.By("Extracting root credentials")
username, password := getVSphereCredentials(oc, vCenter)
exutil.By(fmt.Sprintf("Reserving API/ingress IPs for domains %v", domains2Reserve))
fReserve, fRelease, domain2Ip := getIps2ReserveFromAWSHostedZone(oc, basedomain,
networkCIDR, minIp, maxIp, machineIPs, awsCredsFilePath, domains2Reserve)
defer fRelease()
fReserve()
exutil.By("Creating ClusterDeployment and related resources")
installConfigSecret := vSphereInstallConfig{
secretName: icSecretName,
secretNs: oc.Namespace(),
baseDomain: basedomain,
icName: cdName,
cluster: cluster,
machineNetwork: networkCIDR.CIDR().String(),
apiVip: domain2Ip[apiDomain],
datacenter: datacenter,
datastore: datastore,
ingressVip: domain2Ip[ingressDomain],
network: network,
password: password,
username: username,
vCenter: vCenter,
template: filepath.Join(testDataDir, "vsphere-install-config.yaml"),
}
cd := vSphereClusterDeployment{
fake: false,
name: cdName,
namespace: oc.Namespace(),
baseDomain: basedomain,
manageDns: false,
clusterName: cdName,
certRef: VSphereCerts,
cluster: cluster,
credRef: VSphereCreds,
datacenter: datacenter,
datastore: datastore,
network: network,
vCenter: vCenter,
imageSetRef: imageSetName,
installConfigSecret: icSecretName,
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment-vsphere.yaml"),
}
defer cleanCD(oc, imageSetName, oc.Namespace(), installConfigSecret.secretName, cd.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cd)
exutil.By("Create worker MachinePool ...")
workermachinepoolVSphereTemp := filepath.Join(testDataDir, "machinepool-worker-vsphere.yaml")
workermp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: workermachinepoolVSphereTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-worker"},
)
workermp.create(oc)
exutil.By("Waiting for the CD to be installed")
// TODO(fxie): fail early in case of ProvisionStopped
newCheck("expect", "get", asAdmin, requireNS, compare, "true", ok,
ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-o=jsonpath={.spec.installed}"}).check(oc)
// Check the worker MP in good conditions
newCheck("expect", "get", asAdmin, requireNS, contain, "3", ok,
WaitingForClusterOperatorsTimeout, []string{"MachinePool", cdName + "-worker", "-o=jsonpath={.status.replicas}"}).check(oc)
})
})
| package hive | ||||
test case | openshift/openshift-tests-private | 94863789-ca3c-4218-807d-1a3982bc3186 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:fxie-High-32026-Add hive api for vsphere provisioning [Serial] | ['"fmt"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_vsphere.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:fxie-High-32026-Add hive api for vsphere provisioning [Serial]", func() {
var (
testCaseID = "32026"
cdName = fmt.Sprintf("cd-%s-%s", testCaseID, randStr)
icSecretName = fmt.Sprintf("%s-install-config", cdName)
imageSetName = fmt.Sprintf("%s-imageset", cdName)
apiDomain = fmt.Sprintf("api.%v.%v", cdName, basedomain)
ingressDomain = fmt.Sprintf("*.apps.%v.%v", cdName, basedomain)
domains2Reserve = []string{apiDomain, ingressDomain}
)
exutil.By("Extracting root credentials")
username, password := getVSphereCredentials(oc, vCenter)
exutil.By(fmt.Sprintf("Reserving API/ingress IPs for domains %v", domains2Reserve))
fReserve, fRelease, domain2Ip := getIps2ReserveFromAWSHostedZone(oc, basedomain,
networkCIDR, minIp, maxIp, machineIPs, awsCredsFilePath, domains2Reserve)
defer fRelease()
fReserve()
exutil.By("Creating ClusterDeployment and related resources")
installConfigSecret := vSphereInstallConfig{
secretName: icSecretName,
secretNs: oc.Namespace(),
baseDomain: basedomain,
icName: cdName,
cluster: cluster,
machineNetwork: networkCIDR.CIDR().String(),
apiVip: domain2Ip[apiDomain],
datacenter: datacenter,
datastore: datastore,
ingressVip: domain2Ip[ingressDomain],
network: network,
password: password,
username: username,
vCenter: vCenter,
template: filepath.Join(testDataDir, "vsphere-install-config.yaml"),
}
cd := vSphereClusterDeployment{
fake: false,
name: cdName,
namespace: oc.Namespace(),
baseDomain: basedomain,
manageDns: false,
clusterName: cdName,
certRef: VSphereCerts,
cluster: cluster,
credRef: VSphereCreds,
datacenter: datacenter,
datastore: datastore,
network: network,
vCenter: vCenter,
imageSetRef: imageSetName,
installConfigSecret: icSecretName,
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment-vsphere.yaml"),
}
defer cleanCD(oc, imageSetName, oc.Namespace(), installConfigSecret.secretName, cd.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cd)
exutil.By("Create worker MachinePool ...")
workermachinepoolVSphereTemp := filepath.Join(testDataDir, "machinepool-worker-vsphere.yaml")
workermp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: workermachinepoolVSphereTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-worker"},
)
workermp.create(oc)
exutil.By("Waiting for the CD to be installed")
// TODO(fxie): fail early in case of ProvisionStopped
newCheck("expect", "get", asAdmin, requireNS, compare, "true", ok,
ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-o=jsonpath={.spec.installed}"}).check(oc)
// Check the worker MP in good conditions
newCheck("expect", "get", asAdmin, requireNS, contain, "3", ok,
WaitingForClusterOperatorsTimeout, []string{"MachinePool", cdName + "-worker", "-o=jsonpath={.status.replicas}"}).check(oc)
}) | |||||
test | openshift/openshift-tests-private | 1f24f6fc-b691-41c3-8b9e-6ab34215ea0f | autoscaler | import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | package clusterinfrastructure
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure CAS", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-autoscaler-operator", exutil.KubeConfigPath())
autoscalerBaseDir string
clusterAutoscalerTemplate string
machineAutoscalerTemplate string
workLoadTemplate string
clusterAutoscaler clusterAutoscalerDescription
machineAutoscaler machineAutoscalerDescription
workLoad workLoadDescription
iaasPlatform clusterinfra.PlatformType
infrastructureName string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
infrastructureName = clusterinfra.GetInfrastructureName(oc)
autoscalerBaseDir = exutil.FixturePath("testdata", "clusterinfrastructure", "autoscaler")
clusterAutoscalerTemplate = filepath.Join(autoscalerBaseDir, "clusterautoscaler.yaml")
machineAutoscalerTemplate = filepath.Join(autoscalerBaseDir, "machineautoscaler.yaml")
workLoadTemplate = filepath.Join(autoscalerBaseDir, "workload.yaml")
clusterAutoscaler = clusterAutoscalerDescription{
maxNode: 100,
minCore: 0,
maxCore: 320000,
minMemory: 0,
maxMemory: 6400000,
expander: Random,
template: clusterAutoscalerTemplate,
}
workLoad = workLoadDescription{
name: "workload",
namespace: "openshift-machine-api",
template: workLoadTemplate,
}
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-43174-ClusterAutoscaler CR could be deleted with foreground deletion", func() {
_, err := oc.AdminAPIExtensionsV1Client().CustomResourceDefinitions().Get(context.TODO(),
"clusterautoscalers.autoscaling.openshift.io", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
g.Skip("The cluster does not have pre-requisite CRDs for the test")
}
if err != nil {
e2e.Failf("Failed to get CRD: %v", err)
}
g.By("Create clusterautoscaler")
clusterAutoscaler.createClusterAutoscaler(oc)
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
g.By("Delete clusterautoscaler with foreground deletion")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterautoscaler", "default", "--cascade=foreground").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterautoscaler").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring("default"))
})
//author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Low-45430-MachineSet scaling from 0 should be evaluated correctly for the new or changed instance types [Serial][Slow][Disruptive]", func() {
machinesetName := infrastructureName + "-45430"
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-45430",
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset with instance type other than default in cluster")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
clusterinfra.SkipForAwsOutpostCluster(oc)
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with instanceType")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"instanceType": "m5.4xlarge"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could be created successful")
// Creat a new machine taking roughly 5 minutes , set timeout as 7 minutes
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
})
//author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-44816-Cluster version operator could remove unrecognized volume mounts [Disruptive]", func() {
//As cluster-autoscaler-operator deployment will be synced by cvo, so we don't need defer to resotre autoscaler deployment
g.By("Update cluster-autoscaler-operator deployment's volumeMounts")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("deploy/cluster-autoscaler-operator", "-n", machineAPINamespace, "-p", `[{"op": "add", "path": "/spec/template/spec/containers/0/volumeMounts/0","value":{"mountPath":"/etc/cluster-autoscaler-operator-invalid/service-ca","name":"cert","readOnly":true}}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check cluster-autoscaler-operator deployment was synced by cvo soon")
err = wait.Poll(15*time.Second, 5*time.Minute, func() (bool, error) {
caoDeploy, _ := oc.AsAdmin().WithoutNamespace().Run("describe").Args("deploy/cluster-autoscaler-operator", "-n", machineAPINamespace).Output()
if strings.Contains(caoDeploy, "service-ca") {
e2e.Logf("cluster-autoscaler-operator deployment was not synced by cvo")
return false, nil
}
e2e.Logf("cluster-autoscaler-operator deployment was synced by cvo")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "cluster-autoscaler-operator deployment was not synced by cvo in 5m")
g.By("Check cluster-autoscaler-operator pod is running")
err = wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
podsStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", machineAPINamespace, "-l", "k8s-app=cluster-autoscaler-operator", "-o=jsonpath={.items[0].status.phase}").Output()
if err != nil || strings.Compare(podsStatus, "Running") != 0 {
e2e.Logf("the pod status is %v, continue to next round", podsStatus)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "cluster-autoscaler-operator pod is not Running")
})
//author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47656-Cluster autoscaler could scale down based on scale down utilization threshold [Slow][Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-47656"
utilThreshold := "0.08"
utilThresholdNum := 8
clusterAutoscalerTemplate = filepath.Join(autoscalerBaseDir, "clusterautoscalerutil.yaml")
clusterAutoscaler = clusterAutoscalerDescription{
maxNode: 100,
minCore: 0,
maxCore: 320000,
minMemory: 0,
maxMemory: 6400000,
utilizationThreshold: utilThreshold,
template: clusterAutoscalerTemplate,
}
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-47656",
namespace: "openshift-machine-api",
maxReplicas: 3,
minReplicas: 1,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create a new machineset")
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could be created successful")
clusterinfra.WaitForMachinesRunning(oc, 3, machinesetName)
workLoad.deleteWorkLoad(oc)
/*
Refer to autoscaler use case OCP-28108.
Wait five minutes after deleting workload, the machineset will scale down,
so wait five minutes here, then check whether the machineset is scaled down based on utilizationThreshold.
*/
time.Sleep(300 * time.Second)
g.By("Check machineset could scale down based on utilizationThreshold")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
machinesRunning, _ := strconv.Atoi(out)
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[0].status.nodeRef.name}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeInfoFile, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node", nodeName, "-n", machineAPINamespace).OutputToFile("OCP-47656-nodeinfo.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
getUtilCmd := fmt.Sprintf(`grep -A 10 "Allocated resources:" %s |egrep "cpu|memory"|awk -F"[(%%]" 'BEGIN{util=0} $2>util{util=$2} END{print util}'`, nodeInfoFile)
util, err := exec.Command("bash", "-c", getUtilCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
utilNum, err := strconv.Atoi(strings.TrimSpace(string(util)))
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("utilNum:%s utilThresholdNum:%s", utilNum, utilThresholdNum)
if utilNum < utilThresholdNum {
o.Expect(machinesRunning).Should(o.Equal(1))
} else {
o.Expect(machinesRunning).Should(o.Equal(3))
}
})
//author: miyadav
g.It("Author:miyadav-NonHyperShiftHOST-Critical-53080-Add verbosity option to autoscaler CRD [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterAutoscalerTemplate = filepath.Join(autoscalerBaseDir, "clusterautoscalerverbose.yaml")
clusterAutoscaler = clusterAutoscalerDescription{
logVerbosity: 8,
maxNode: 100,
minCore: 0,
maxCore: 320000,
minMemory: 0,
maxMemory: 6400000,
template: clusterAutoscalerTemplate,
}
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Get clusterautoscaler podname")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
podName, err := oc.AsAdmin().Run("get").Args("pods", "-l", "cluster-autoscaler", "-o=jsonpath={.items[0].metadata.name}", "-n", "openshift-machine-api").Output()
if err != nil {
e2e.Logf("error %v is present but this is temprorary..hence trying again ", err.Error())
return false, nil
}
g.By("Get clusterautoscaler log verbosity value for pod")
args, _ := oc.AsAdmin().Run("get").Args("pods", podName, "-n", machineAPINamespace, "-o=jsonpath={.spec.containers[0].args}").Output()
if !strings.Contains(args, "--v=8") {
e2e.Failf("Even after adding logverbosity log levels not changed")
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "autoscaler pod never for created..")
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-44051-ClusterAutoscalerUnableToScaleCPULimitReached alert should be filed when cpu resource is not enough[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-44051"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create clusterautoscaler")
clusterAutoscaler.minCore = 8
clusterAutoscaler.maxCore = 23
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-44051",
namespace: "openshift-machine-api",
maxReplicas: 10,
minReplicas: 1,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check alert ClusterAutoscalerUnableToScaleCPULimitReached is raised")
checkAlertRaised(oc, "ClusterAutoscalerUnableToScaleCPULimitReached")
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-44211-ClusterAutoscalerUnableToScaleMemoryLimitReached alert should be filed when memory resource is not enough[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-44211"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create clusterautoscaler")
clusterAutoscaler.minMemory = 4
clusterAutoscaler.maxMemory = 50
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-44211",
namespace: "openshift-machine-api",
maxReplicas: 10,
minReplicas: 1,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check alert ClusterAutoscalerUnableToScaleMemoryLimitReached is raised")
checkAlertRaised(oc, "ClusterAutoscalerUnableToScaleMemoryLimitReached")
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37854-Autoscaler will scale down the nodegroup that has Failed machine when maxNodeProvisionTime is reached[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.OpenStack, clusterinfra.VSphere)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-37854"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
var invalidValue string
iaasPlatform = clusterinfra.CheckPlatform(oc)
switch iaasPlatform {
case clusterinfra.AWS:
invalidValue = "\"instanceType\": \"invalid\""
case clusterinfra.Azure:
invalidValue = "\"vmSize\": \"invalid\""
case clusterinfra.GCP:
invalidValue = "\"machineType\": \"invalid\""
case clusterinfra.OpenStack:
invalidValue = "\"flavor\": \"invalid\""
case clusterinfra.VSphere:
invalidValue = "\"template\": \"invalid\""
}
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{`+invalidValue+`}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-37854",
namespace: "openshift-machine-api",
maxReplicas: 2,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check new created machine has 'Failed' phase")
clusterinfra.WaitForMachineFailed(oc, machinesetName)
g.By("Check cluster auto scales down and node group will be marked as backoff")
autoscalePodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "cluster-autoscaler=default", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(30*time.Second, 1200*time.Second, func() (bool, error) {
autoscalerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+autoscalePodName, "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
replicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if replicas == "0" && strings.Contains(autoscalerLog, "Scale-up timed out for node group") && strings.Contains(autoscalerLog, "Marking unregistered node failed-machine-openshift-machine-api_") && strings.Contains(autoscalerLog, "openshift-machine-api/"+machinesetName+" is not ready for scaleup - backoff") {
return true, nil
}
e2e.Logf("cluster didn't autoscale down or node group didn't be marked as backoff")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Check didn't scales down or node group didn't be marked as backoff")
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-28876-Machineset should have relevant annotations to support scale from/to zero[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-28876"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Add a new annotation to machineset")
oc.AsAdmin().WithoutNamespace().Run("annotate").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "--overwrite", "new=new").Output()
g.By("Check machineset with valid instanceType have annotations")
machineSetAnnotations, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", machineSetAnnotations)
o.Expect(strings.Contains(machineSetAnnotations, "machine.openshift.io/memoryMb") && strings.Contains(machineSetAnnotations, "new")).To(o.BeTrue())
g.By("Check machineset with invalid instanceType couldn't set autoscaling from zero annotations")
var invalidValue string
iaasPlatform = clusterinfra.CheckPlatform(oc)
switch iaasPlatform {
case clusterinfra.AWS:
invalidValue = "\"instanceType\": \"invalid\""
case clusterinfra.Azure:
invalidValue = "\"vmSize\": \"invalid\""
case clusterinfra.GCP:
invalidValue = "\"machineType\": \"invalid\""
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{`+invalidValue+`}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
machineControllerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "api=clusterapi,k8s-app=controller", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineControllerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+machineControllerPodName, "-c", "machine-controller", "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(machineControllerLog, "unknown instance type") || strings.Contains(machineControllerLog, "Failed to set autoscaling from zero annotations, instance type unknown")).To(o.BeTrue())
})
//author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-22038-Cluster-autoscaler should support scale machinset from/to 0 [Serial][Slow][Disruptive]", func() {
machinesetName := infrastructureName + "-22038"
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-22038",
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.OpenStack, clusterinfra.GCP, clusterinfra.VSphere)
architecture.SkipArchitectures(oc, architecture.MULTI)
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could be created successful")
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-66157-Cluster Autoscaler Operator should inject unique labels on Nutanix platform", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
exutil.By("Create clusterautoscaler")
clusterAutoscaler.createClusterAutoscaler(oc)
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
exutil.By("adding balancedSimilar nodes option for clusterautoscaler")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterautoscaler", "default", "-n", "openshift-machine-api", "-p", `{"spec":{"balanceSimilarNodeGroups": true}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// after patching waiting 10 seconds as new pod is restarted
time.Sleep(10 * time.Second)
g.By("Check whether the pod has expected flags/options")
expectedFlags := `--balancing-ignore-label=nutanix.com/prism-element-name
--balancing-ignore-label=nutanix.com/prism-element-uuid
--balancing-ignore-label=nutanix.com/prism-host-name
--balancing-ignore-label=nutanix.com/prism-host-uuid
`
flagsArray := strings.Split(expectedFlags, "\n")
for _, flag := range flagsArray {
trimmedFlag := strings.TrimSpace(flag)
output, describeErr := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", "-n", "openshift-machine-api", "-l", "cluster-autoscaler=default").Output()
o.Expect(describeErr).NotTo(o.HaveOccurred())
if strings.Contains(output, trimmedFlag) {
e2e.Logf("Flag '%s' is present.\n", trimmedFlag)
} else {
e2e.Failf("Flag %s is not exist", trimmedFlag)
}
}
})
//author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-64869-autoscaler can predict the correct machineset to scale up/down to allocate a particular arch [Serial][Slow][Disruptive]", func() {
architecture.SkipNonMultiArchCluster(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure)
clusterinfra.SkipConditionally(oc)
architectures := architecture.GetAvailableArchitecturesSet(oc)
var scaleArch *architecture.Architecture
var machineSetNames []string
var machineSetToScale string
for _, arch := range architectures {
machinesetName := infrastructureName + "-64869-" + arch.String()
machineSetNames = append(machineSetNames, machinesetName)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSetByArch(oc, arch)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"metadata":{"labels":{"zero":"zero"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
if scaleArch == nil || arch != architecture.AMD64 {
// The last non-amd64 arch is chosen to be scaled.
// Moreover, regardless of what arch it is, we ensure scaleArch to be non-nil by setting it at least
// once to a non-nil value.
scaleArch = new(architecture.Architecture) // new memory allocation for a variable to host Architecture values
*scaleArch = arch // assign by value (set the same value in arch as the value hosted at the address scaleArch
machineSetToScale = machinesetName
}
}
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create workload")
workLoadTemplate = filepath.Join(autoscalerBaseDir, "workload-with-affinity.yaml")
workLoad = workLoadDescription{
name: "workload",
namespace: "openshift-machine-api",
template: workLoadTemplate,
arch: *scaleArch,
cpu: getWorkLoadCPU(oc, machineSetToScale),
}
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could only be scaled on this machineset")
o.Eventually(func() int {
return clusterinfra.GetMachineSetReplicas(oc, machineSetToScale)
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(1), "The "+scaleArch.String()+"machineset replicas should be 1")
var replicas int
o.Consistently(func() int {
replicas = 0
for _, machinesetName := range machineSetNames {
if machinesetName != machineSetToScale {
replicas += clusterinfra.GetMachineSetReplicas(oc, machinesetName)
}
}
return replicas
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(0), "The other machineset(s) replicas should be 0")
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Medium-73113-Update CAO to add upstream scale from zero annotations[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.OpenStack, clusterinfra.Nutanix, clusterinfra.IBMCloud)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-73113"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create machineautoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-73113",
namespace: machineAPINamespace,
maxReplicas: 2,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Check machineset have upstream scale from zero annotations")
machineSetAnnotations, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", machineSetAnnotations)
o.Expect(strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/memory") && strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/cpu")).To(o.BeTrue())
if strings.Contains(machineSetAnnotations, "machine.openshift.io/GPU") {
o.Expect(strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/gpu-count") && strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/gpu-type")).To(o.BeTrue())
}
})
//author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73120-Cluster autoscaler support least-waste expander option to decide which machineset to expand [Serial][Slow][Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure)
exutil.By("Create clusterautoscaler")
clusterAutoscaler.expander = LeastWaste
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
exutil.By("Create machinesets and machineautoscalers")
var machineSetNames []string
if architecture.IsMultiArchCluster(oc) {
architectures := architecture.GetAvailableArchitecturesSet(oc)
for _, arch := range architectures {
machinesetName := infrastructureName + "-73120-" + arch.String()
machineSetNames = append(machineSetNames, machinesetName)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSetByArch(oc, arch)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
} else {
machineSetNames = []string{infrastructureName + "-73120-1", infrastructureName + "-73120-2"}
for _, machinesetName := range machineSetNames {
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
arch := architecture.ClusterArchitecture(oc)
iaasPlatform = clusterinfra.FromString(exutil.CheckPlatform(oc))
instanceTypeKey := clusterinfra.GetInstanceTypeKeyByProvider(iaasPlatform)
instanceTypeValues := clusterinfra.GetInstanceTypeValuesByProviderAndArch(iaasPlatform, arch)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machineSetNames[0], "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"`+instanceTypeKey+`":"`+instanceTypeValues[0]+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machineSetNames[0], "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"`+instanceTypeKey+`":"`+instanceTypeValues[1]+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
exutil.By("Check autoscaler scales up based on LeastWaste")
autoscalePodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "cluster-autoscaler=default", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
autoscalerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+autoscalePodName, "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(autoscalerLog, "Expanding Node Group MachineSet/openshift-machine-api/"+machineSetNames[0]+" would waste") && strings.Contains(autoscalerLog, "Expanding Node Group MachineSet/openshift-machine-api/"+machineSetNames[1]+" would waste") {
return true, nil
}
e2e.Logf("There is no LeastWaste info in autoscaler logs")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "cluster didn't scale up based on LeastWaste")
o.Eventually(func() int {
return clusterinfra.GetMachineSetReplicas(oc, machineSetNames[0]) * clusterinfra.GetMachineSetReplicas(oc, machineSetNames[1])
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(1), "The machinesets should scale up to 1")
})
//author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73446-Cluster autoscaler support priority expander option to decide which machineset to expand [Serial][Slow][Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure)
exutil.By("Create machinesets and machineautoscalers")
var machineSetNames []string
if architecture.IsMultiArchCluster(oc) {
architectures := architecture.GetAvailableArchitecturesSet(oc)
for _, arch := range architectures {
machinesetName := infrastructureName + "-73446-" + arch.String()
machineSetNames = append(machineSetNames, machinesetName)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSetByArch(oc, arch)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
} else {
machineSetNames = []string{infrastructureName + "-73446-1", infrastructureName + "-73446-2"}
for _, machinesetName := range machineSetNames {
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
}
exutil.By("Create clusterautoscaler")
clusterAutoscaler.expander = Priority
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
exutil.By("Create cluster-autoscaler-priority-expander")
priorityExpanderTemplate := filepath.Join(autoscalerBaseDir, "cluster-autoscaler-priority-expander.yaml")
priorityExpander := priorityExpanderDescription{
p10: machineSetNames[0],
p20: machineSetNames[1],
namespace: "openshift-machine-api",
template: priorityExpanderTemplate,
}
defer priorityExpander.deletePriorityExpander(oc)
priorityExpander.createPriorityExpander(oc)
exutil.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
exutil.By("Check autoscaler scales up based on Priority")
o.Eventually(func() int {
return clusterinfra.GetMachineSetReplicas(oc, machineSetNames[0]) * clusterinfra.GetMachineSetReplicas(oc, machineSetNames[1])
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(1), "The machinesets should scale to 1")
o.Expect(exutil.CompareMachineCreationTime(oc, machineSetNames[0], machineSetNames[1])).Should(o.Equal(true))
})
// author: [email protected]
// This case failed because of bug https://issues.redhat.com/browse/OCPBUGS-9841, so add Flaky
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-68627-Cluster autoscaler can rescale up from 0 after the first scale up and taint nodes directly [Disruptive][Flaky]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.OpenStack, clusterinfra.Nutanix)
machinesetName := infrastructureName + "-68627"
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-68627",
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
exutil.By("Create machineset")
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
exutil.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
exutil.By("Create workload and wait for machine running")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Taint node NoSchedule with a custom taint")
nodeName := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", nodeName, "key1=value1:NoSchedule").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("Delete workload pod and wait for cluster stable")
workLoad.deleteWorkLoad(oc)
clusterinfra.WaitForMachinesRunning(oc, 0, machinesetName)
exutil.By("Once a zero, create another wave of pods to scale up cluster")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 6327e15c-5dbf-4630-a843-f643ace7ba0a | Author:zhsun-NonHyperShiftHOST-Medium-43174-ClusterAutoscaler CR could be deleted with foreground deletion | ['"context"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-43174-ClusterAutoscaler CR could be deleted with foreground deletion", func() {
_, err := oc.AdminAPIExtensionsV1Client().CustomResourceDefinitions().Get(context.TODO(),
"clusterautoscalers.autoscaling.openshift.io", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
g.Skip("The cluster does not have pre-requisite CRDs for the test")
}
if err != nil {
e2e.Failf("Failed to get CRD: %v", err)
}
g.By("Create clusterautoscaler")
clusterAutoscaler.createClusterAutoscaler(oc)
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
g.By("Delete clusterautoscaler with foreground deletion")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterautoscaler", "default", "--cascade=foreground").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterautoscaler").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring("default"))
}) | |||||
test case | openshift/openshift-tests-private | d9486f1a-af57-48a0-bd0f-76af1eb7473c | Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Low-45430-MachineSet scaling from 0 should be evaluated correctly for the new or changed instance types [Serial][Slow][Disruptive] | ['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Low-45430-MachineSet scaling from 0 should be evaluated correctly for the new or changed instance types [Serial][Slow][Disruptive]", func() {
machinesetName := infrastructureName + "-45430"
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-45430",
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset with instance type other than default in cluster")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
clusterinfra.SkipForAwsOutpostCluster(oc)
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with instanceType")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"instanceType": "m5.4xlarge"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could be created successful")
// Creat a new machine taking roughly 5 minutes , set timeout as 7 minutes
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | 76ab2e3d-43de-4346-8e8b-66abc346c337 | Author:zhsun-NonHyperShiftHOST-Medium-44816-Cluster version operator could remove unrecognized volume mounts [Disruptive] | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-44816-Cluster version operator could remove unrecognized volume mounts [Disruptive]", func() {
//As cluster-autoscaler-operator deployment will be synced by cvo, so we don't need defer to resotre autoscaler deployment
g.By("Update cluster-autoscaler-operator deployment's volumeMounts")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("deploy/cluster-autoscaler-operator", "-n", machineAPINamespace, "-p", `[{"op": "add", "path": "/spec/template/spec/containers/0/volumeMounts/0","value":{"mountPath":"/etc/cluster-autoscaler-operator-invalid/service-ca","name":"cert","readOnly":true}}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check cluster-autoscaler-operator deployment was synced by cvo soon")
err = wait.Poll(15*time.Second, 5*time.Minute, func() (bool, error) {
caoDeploy, _ := oc.AsAdmin().WithoutNamespace().Run("describe").Args("deploy/cluster-autoscaler-operator", "-n", machineAPINamespace).Output()
if strings.Contains(caoDeploy, "service-ca") {
e2e.Logf("cluster-autoscaler-operator deployment was not synced by cvo")
return false, nil
}
e2e.Logf("cluster-autoscaler-operator deployment was synced by cvo")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "cluster-autoscaler-operator deployment was not synced by cvo in 5m")
g.By("Check cluster-autoscaler-operator pod is running")
err = wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
podsStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", machineAPINamespace, "-l", "k8s-app=cluster-autoscaler-operator", "-o=jsonpath={.items[0].status.phase}").Output()
if err != nil || strings.Compare(podsStatus, "Running") != 0 {
e2e.Logf("the pod status is %v, continue to next round", podsStatus)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "cluster-autoscaler-operator pod is not Running")
}) | |||||
test case | openshift/openshift-tests-private | 8b3e02f7-e1d4-44ec-8e69-2ae60222815e | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47656-Cluster autoscaler could scale down based on scale down utilization threshold [Slow][Disruptive] | ['"fmt"', '"os/exec"', '"path/filepath"', '"strconv"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'exutil "github.com/openshift/openshift-tests-private/test/extended/util"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47656-Cluster autoscaler could scale down based on scale down utilization threshold [Slow][Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-47656"
utilThreshold := "0.08"
utilThresholdNum := 8
clusterAutoscalerTemplate = filepath.Join(autoscalerBaseDir, "clusterautoscalerutil.yaml")
clusterAutoscaler = clusterAutoscalerDescription{
maxNode: 100,
minCore: 0,
maxCore: 320000,
minMemory: 0,
maxMemory: 6400000,
utilizationThreshold: utilThreshold,
template: clusterAutoscalerTemplate,
}
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-47656",
namespace: "openshift-machine-api",
maxReplicas: 3,
minReplicas: 1,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create a new machineset")
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could be created successful")
clusterinfra.WaitForMachinesRunning(oc, 3, machinesetName)
workLoad.deleteWorkLoad(oc)
/*
Refer to autoscaler use case OCP-28108.
Wait five minutes after deleting workload, the machineset will scale down,
so wait five minutes here, then check whether the machineset is scaled down based on utilizationThreshold.
*/
time.Sleep(300 * time.Second)
g.By("Check machineset could scale down based on utilizationThreshold")
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
machinesRunning, _ := strconv.Atoi(out)
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[0].status.nodeRef.name}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeInfoFile, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node", nodeName, "-n", machineAPINamespace).OutputToFile("OCP-47656-nodeinfo.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
getUtilCmd := fmt.Sprintf(`grep -A 10 "Allocated resources:" %s |egrep "cpu|memory"|awk -F"[(%%]" 'BEGIN{util=0} $2>util{util=$2} END{print util}'`, nodeInfoFile)
util, err := exec.Command("bash", "-c", getUtilCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
utilNum, err := strconv.Atoi(strings.TrimSpace(string(util)))
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("utilNum:%s utilThresholdNum:%s", utilNum, utilThresholdNum)
if utilNum < utilThresholdNum {
o.Expect(machinesRunning).Should(o.Equal(1))
} else {
o.Expect(machinesRunning).Should(o.Equal(3))
}
}) | |||||
test case | openshift/openshift-tests-private | 86d2eda4-89f7-4b90-ad1c-35615d64186d | Author:miyadav-NonHyperShiftHOST-Critical-53080-Add verbosity option to autoscaler CRD [Disruptive] | ['"path/filepath"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:miyadav-NonHyperShiftHOST-Critical-53080-Add verbosity option to autoscaler CRD [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterAutoscalerTemplate = filepath.Join(autoscalerBaseDir, "clusterautoscalerverbose.yaml")
clusterAutoscaler = clusterAutoscalerDescription{
logVerbosity: 8,
maxNode: 100,
minCore: 0,
maxCore: 320000,
minMemory: 0,
maxMemory: 6400000,
template: clusterAutoscalerTemplate,
}
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Get clusterautoscaler podname")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
podName, err := oc.AsAdmin().Run("get").Args("pods", "-l", "cluster-autoscaler", "-o=jsonpath={.items[0].metadata.name}", "-n", "openshift-machine-api").Output()
if err != nil {
e2e.Logf("error %v is present but this is temprorary..hence trying again ", err.Error())
return false, nil
}
g.By("Get clusterautoscaler log verbosity value for pod")
args, _ := oc.AsAdmin().Run("get").Args("pods", podName, "-n", machineAPINamespace, "-o=jsonpath={.spec.containers[0].args}").Output()
if !strings.Contains(args, "--v=8") {
e2e.Failf("Even after adding logverbosity log levels not changed")
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "autoscaler pod never for created..")
}) | |||||
test case | openshift/openshift-tests-private | 4e9eff9c-1bc2-47e0-9aca-ada4cb5899e1 | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-44051-ClusterAutoscalerUnableToScaleCPULimitReached alert should be filed when cpu resource is not enough[Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-44051-ClusterAutoscalerUnableToScaleCPULimitReached alert should be filed when cpu resource is not enough[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-44051"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create clusterautoscaler")
clusterAutoscaler.minCore = 8
clusterAutoscaler.maxCore = 23
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-44051",
namespace: "openshift-machine-api",
maxReplicas: 10,
minReplicas: 1,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check alert ClusterAutoscalerUnableToScaleCPULimitReached is raised")
checkAlertRaised(oc, "ClusterAutoscalerUnableToScaleCPULimitReached")
}) | |||||
test case | openshift/openshift-tests-private | caf6e164-e2e9-4c35-b5d2-d24d26a165f6 | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-44211-ClusterAutoscalerUnableToScaleMemoryLimitReached alert should be filed when memory resource is not enough[Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-44211-ClusterAutoscalerUnableToScaleMemoryLimitReached alert should be filed when memory resource is not enough[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-44211"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create clusterautoscaler")
clusterAutoscaler.minMemory = 4
clusterAutoscaler.maxMemory = 50
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-44211",
namespace: "openshift-machine-api",
maxReplicas: 10,
minReplicas: 1,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check alert ClusterAutoscalerUnableToScaleMemoryLimitReached is raised")
checkAlertRaised(oc, "ClusterAutoscalerUnableToScaleMemoryLimitReached")
}) | |||||
test case | openshift/openshift-tests-private | aa39ab58-113c-4dca-9a27-8d53d50cf7db | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37854-Autoscaler will scale down the nodegroup that has Failed machine when maxNodeProvisionTime is reached[Disruptive] | ['"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37854-Autoscaler will scale down the nodegroup that has Failed machine when maxNodeProvisionTime is reached[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.OpenStack, clusterinfra.VSphere)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-37854"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
var invalidValue string
iaasPlatform = clusterinfra.CheckPlatform(oc)
switch iaasPlatform {
case clusterinfra.AWS:
invalidValue = "\"instanceType\": \"invalid\""
case clusterinfra.Azure:
invalidValue = "\"vmSize\": \"invalid\""
case clusterinfra.GCP:
invalidValue = "\"machineType\": \"invalid\""
case clusterinfra.OpenStack:
invalidValue = "\"flavor\": \"invalid\""
case clusterinfra.VSphere:
invalidValue = "\"template\": \"invalid\""
}
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{`+invalidValue+`}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create MachineAutoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-37854",
namespace: "openshift-machine-api",
maxReplicas: 2,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check new created machine has 'Failed' phase")
clusterinfra.WaitForMachineFailed(oc, machinesetName)
g.By("Check cluster auto scales down and node group will be marked as backoff")
autoscalePodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "cluster-autoscaler=default", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(30*time.Second, 1200*time.Second, func() (bool, error) {
autoscalerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+autoscalePodName, "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
replicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if replicas == "0" && strings.Contains(autoscalerLog, "Scale-up timed out for node group") && strings.Contains(autoscalerLog, "Marking unregistered node failed-machine-openshift-machine-api_") && strings.Contains(autoscalerLog, "openshift-machine-api/"+machinesetName+" is not ready for scaleup - backoff") {
return true, nil
}
e2e.Logf("cluster didn't autoscale down or node group didn't be marked as backoff")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Check didn't scales down or node group didn't be marked as backoff")
}) | |||||
test case | openshift/openshift-tests-private | d099a5c1-2fc5-41a8-a130-54a3c5cb298c | Author:zhsun-NonHyperShiftHOST-Medium-28876-Machineset should have relevant annotations to support scale from/to zero[Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-28876-Machineset should have relevant annotations to support scale from/to zero[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-28876"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Add a new annotation to machineset")
oc.AsAdmin().WithoutNamespace().Run("annotate").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "--overwrite", "new=new").Output()
g.By("Check machineset with valid instanceType have annotations")
machineSetAnnotations, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", machineSetAnnotations)
o.Expect(strings.Contains(machineSetAnnotations, "machine.openshift.io/memoryMb") && strings.Contains(machineSetAnnotations, "new")).To(o.BeTrue())
g.By("Check machineset with invalid instanceType couldn't set autoscaling from zero annotations")
var invalidValue string
iaasPlatform = clusterinfra.CheckPlatform(oc)
switch iaasPlatform {
case clusterinfra.AWS:
invalidValue = "\"instanceType\": \"invalid\""
case clusterinfra.Azure:
invalidValue = "\"vmSize\": \"invalid\""
case clusterinfra.GCP:
invalidValue = "\"machineType\": \"invalid\""
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{`+invalidValue+`}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
machineControllerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "api=clusterapi,k8s-app=controller", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineControllerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+machineControllerPodName, "-c", "machine-controller", "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(machineControllerLog, "unknown instance type") || strings.Contains(machineControllerLog, "Failed to set autoscaling from zero annotations, instance type unknown")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | c03dff4a-8112-4cb0-ae16-15b3024bc643 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-22038-Cluster-autoscaler should support scale machinset from/to 0 [Serial][Slow][Disruptive] | ['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-22038-Cluster-autoscaler should support scale machinset from/to 0 [Serial][Slow][Disruptive]", func() {
machinesetName := infrastructureName + "-22038"
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-22038",
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.OpenStack, clusterinfra.GCP, clusterinfra.VSphere)
architecture.SkipArchitectures(oc, architecture.MULTI)
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could be created successful")
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | 426d1577-ebab-445d-a9b5-9bce1aa3b0e9 | Author:miyadav-NonHyperShiftHOST-Medium-66157-Cluster Autoscaler Operator should inject unique labels on Nutanix platform | ['"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-66157-Cluster Autoscaler Operator should inject unique labels on Nutanix platform", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
exutil.By("Create clusterautoscaler")
clusterAutoscaler.createClusterAutoscaler(oc)
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
exutil.By("adding balancedSimilar nodes option for clusterautoscaler")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterautoscaler", "default", "-n", "openshift-machine-api", "-p", `{"spec":{"balanceSimilarNodeGroups": true}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// after patching waiting 10 seconds as new pod is restarted
time.Sleep(10 * time.Second)
g.By("Check whether the pod has expected flags/options")
expectedFlags := `--balancing-ignore-label=nutanix.com/prism-element-name
--balancing-ignore-label=nutanix.com/prism-element-uuid
--balancing-ignore-label=nutanix.com/prism-host-name
--balancing-ignore-label=nutanix.com/prism-host-uuid
`
flagsArray := strings.Split(expectedFlags, "\n")
for _, flag := range flagsArray {
trimmedFlag := strings.TrimSpace(flag)
output, describeErr := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", "-n", "openshift-machine-api", "-l", "cluster-autoscaler=default").Output()
o.Expect(describeErr).NotTo(o.HaveOccurred())
if strings.Contains(output, trimmedFlag) {
e2e.Logf("Flag '%s' is present.\n", trimmedFlag)
} else {
e2e.Failf("Flag %s is not exist", trimmedFlag)
}
}
}) | |||||
test case | openshift/openshift-tests-private | a9bae29f-f174-4f08-8080-d28eeb18b3f4 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-64869-autoscaler can predict the correct machineset to scale up/down to allocate a particular arch [Serial][Slow][Disruptive] | ['"path/filepath"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-64869-autoscaler can predict the correct machineset to scale up/down to allocate a particular arch [Serial][Slow][Disruptive]", func() {
architecture.SkipNonMultiArchCluster(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure)
clusterinfra.SkipConditionally(oc)
architectures := architecture.GetAvailableArchitecturesSet(oc)
var scaleArch *architecture.Architecture
var machineSetNames []string
var machineSetToScale string
for _, arch := range architectures {
machinesetName := infrastructureName + "-64869-" + arch.String()
machineSetNames = append(machineSetNames, machinesetName)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSetByArch(oc, arch)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"metadata":{"labels":{"zero":"zero"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
if scaleArch == nil || arch != architecture.AMD64 {
// The last non-amd64 arch is chosen to be scaled.
// Moreover, regardless of what arch it is, we ensure scaleArch to be non-nil by setting it at least
// once to a non-nil value.
scaleArch = new(architecture.Architecture) // new memory allocation for a variable to host Architecture values
*scaleArch = arch // assign by value (set the same value in arch as the value hosted at the address scaleArch
machineSetToScale = machinesetName
}
}
g.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
g.By("Create workload")
workLoadTemplate = filepath.Join(autoscalerBaseDir, "workload-with-affinity.yaml")
workLoad = workLoadDescription{
name: "workload",
namespace: "openshift-machine-api",
template: workLoadTemplate,
arch: *scaleArch,
cpu: getWorkLoadCPU(oc, machineSetToScale),
}
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
g.By("Check machine could only be scaled on this machineset")
o.Eventually(func() int {
return clusterinfra.GetMachineSetReplicas(oc, machineSetToScale)
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(1), "The "+scaleArch.String()+"machineset replicas should be 1")
var replicas int
o.Consistently(func() int {
replicas = 0
for _, machinesetName := range machineSetNames {
if machinesetName != machineSetToScale {
replicas += clusterinfra.GetMachineSetReplicas(oc, machinesetName)
}
}
return replicas
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(0), "The other machineset(s) replicas should be 0")
}) | |||||
test case | openshift/openshift-tests-private | 3d01b4a2-66ea-4515-96fa-c30c865e6862 | Author:huliu-NonHyperShiftHOST-Medium-73113-Update CAO to add upstream scale from zero annotations[Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:huliu-NonHyperShiftHOST-Medium-73113-Update CAO to add upstream scale from zero annotations[Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.OpenStack, clusterinfra.Nutanix, clusterinfra.IBMCloud)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-73113"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create machineautoscaler")
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-73113",
namespace: machineAPINamespace,
maxReplicas: 2,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
g.By("Check machineset have upstream scale from zero annotations")
machineSetAnnotations, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", machineSetAnnotations)
o.Expect(strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/memory") && strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/cpu")).To(o.BeTrue())
if strings.Contains(machineSetAnnotations, "machine.openshift.io/GPU") {
o.Expect(strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/gpu-count") && strings.Contains(machineSetAnnotations, "capacity.cluster-autoscaler.kubernetes.io/gpu-type")).To(o.BeTrue())
}
}) | |||||
test case | openshift/openshift-tests-private | 234aaebc-e6f5-4669-9c50-85dfeb24cc22 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73120-Cluster autoscaler support least-waste expander option to decide which machineset to expand [Serial][Slow][Disruptive] | ['"strings"', '"time"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73120-Cluster autoscaler support least-waste expander option to decide which machineset to expand [Serial][Slow][Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure)
exutil.By("Create clusterautoscaler")
clusterAutoscaler.expander = LeastWaste
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
exutil.By("Create machinesets and machineautoscalers")
var machineSetNames []string
if architecture.IsMultiArchCluster(oc) {
architectures := architecture.GetAvailableArchitecturesSet(oc)
for _, arch := range architectures {
machinesetName := infrastructureName + "-73120-" + arch.String()
machineSetNames = append(machineSetNames, machinesetName)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSetByArch(oc, arch)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
} else {
machineSetNames = []string{infrastructureName + "-73120-1", infrastructureName + "-73120-2"}
for _, machinesetName := range machineSetNames {
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
arch := architecture.ClusterArchitecture(oc)
iaasPlatform = clusterinfra.FromString(exutil.CheckPlatform(oc))
instanceTypeKey := clusterinfra.GetInstanceTypeKeyByProvider(iaasPlatform)
instanceTypeValues := clusterinfra.GetInstanceTypeValuesByProviderAndArch(iaasPlatform, arch)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machineSetNames[0], "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"`+instanceTypeKey+`":"`+instanceTypeValues[0]+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machineSetNames[0], "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"`+instanceTypeKey+`":"`+instanceTypeValues[1]+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
exutil.By("Check autoscaler scales up based on LeastWaste")
autoscalePodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "cluster-autoscaler=default", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
autoscalerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+autoscalePodName, "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(autoscalerLog, "Expanding Node Group MachineSet/openshift-machine-api/"+machineSetNames[0]+" would waste") && strings.Contains(autoscalerLog, "Expanding Node Group MachineSet/openshift-machine-api/"+machineSetNames[1]+" would waste") {
return true, nil
}
e2e.Logf("There is no LeastWaste info in autoscaler logs")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "cluster didn't scale up based on LeastWaste")
o.Eventually(func() int {
return clusterinfra.GetMachineSetReplicas(oc, machineSetNames[0]) * clusterinfra.GetMachineSetReplicas(oc, machineSetNames[1])
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(1), "The machinesets should scale up to 1")
}) | |||||
test case | openshift/openshift-tests-private | bc12d954-29cb-4b6f-ba44-21b84ca7beef | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73446-Cluster autoscaler support priority expander option to decide which machineset to expand [Serial][Slow][Disruptive] | ['"path/filepath"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73446-Cluster autoscaler support priority expander option to decide which machineset to expand [Serial][Slow][Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure)
exutil.By("Create machinesets and machineautoscalers")
var machineSetNames []string
if architecture.IsMultiArchCluster(oc) {
architectures := architecture.GetAvailableArchitecturesSet(oc)
for _, arch := range architectures {
machinesetName := infrastructureName + "-73446-" + arch.String()
machineSetNames = append(machineSetNames, machinesetName)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
g.By("Create machineset")
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSetByArch(oc, arch)
g.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
} else {
machineSetNames = []string{infrastructureName + "-73446-1", infrastructureName + "-73446-2"}
for _, machinesetName := range machineSetNames {
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineAutoscaler := machineAutoscalerDescription{
name: machinesetName,
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
}
}
exutil.By("Create clusterautoscaler")
clusterAutoscaler.expander = Priority
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
exutil.By("Create cluster-autoscaler-priority-expander")
priorityExpanderTemplate := filepath.Join(autoscalerBaseDir, "cluster-autoscaler-priority-expander.yaml")
priorityExpander := priorityExpanderDescription{
p10: machineSetNames[0],
p20: machineSetNames[1],
namespace: "openshift-machine-api",
template: priorityExpanderTemplate,
}
defer priorityExpander.deletePriorityExpander(oc)
priorityExpander.createPriorityExpander(oc)
exutil.By("Create workload")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
exutil.By("Check autoscaler scales up based on Priority")
o.Eventually(func() int {
return clusterinfra.GetMachineSetReplicas(oc, machineSetNames[0]) * clusterinfra.GetMachineSetReplicas(oc, machineSetNames[1])
}, defaultTimeout, defaultTimeout/10).Should(o.Equal(1), "The machinesets should scale to 1")
o.Expect(exutil.CompareMachineCreationTime(oc, machineSetNames[0], machineSetNames[1])).Should(o.Equal(true))
}) | |||||
test case | openshift/openshift-tests-private | 9b7bbb69-e8a8-4b26-9038-79948d15da71 | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-68627-Cluster autoscaler can rescale up from 0 after the first scale up and taint nodes directly [Disruptive][Flaky] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-68627-Cluster autoscaler can rescale up from 0 after the first scale up and taint nodes directly [Disruptive][Flaky]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.OpenStack, clusterinfra.Nutanix)
machinesetName := infrastructureName + "-68627"
machineAutoscaler = machineAutoscalerDescription{
name: "machineautoscaler-68627",
namespace: "openshift-machine-api",
maxReplicas: 1,
minReplicas: 0,
template: machineAutoscalerTemplate,
machineSetName: machinesetName,
}
exutil.By("Create machineset")
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Create MachineAutoscaler")
defer machineAutoscaler.deleteMachineAutoscaler(oc)
machineAutoscaler.createMachineAutoscaler(oc)
exutil.By("Create clusterautoscaler")
defer clusterAutoscaler.deleteClusterAutoscaler(oc)
clusterAutoscaler.createClusterAutoscaler(oc)
exutil.By("Create workload and wait for machine running")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Taint node NoSchedule with a custom taint")
nodeName := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", nodeName, "key1=value1:NoSchedule").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
exutil.By("Delete workload pod and wait for cluster stable")
workLoad.deleteWorkLoad(oc)
clusterinfra.WaitForMachinesRunning(oc, 0, machinesetName)
exutil.By("Once a zero, create another wave of pods to scale up cluster")
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}) | |||||
file | openshift/openshift-tests-private | 694a119e-2b83-47b8-acf9-7cdf4ad37843 | autoscaler_utils | import (
"strconv"
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | package clusterinfrastructure
import (
"strconv"
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type ExpanderImplementation int
const (
Random ExpanderImplementation = iota
LeastWaste
Priority
)
type clusterAutoscalerDescription struct {
maxNode int
minCore int
maxCore int
minMemory int
maxMemory int
utilizationThreshold string
template string
logVerbosity int
expander ExpanderImplementation
}
type machineAutoscalerDescription struct {
name string
namespace string
maxReplicas int
minReplicas int
template string
machineSetName string
}
type workLoadDescription struct {
name string
namespace string
template string
arch architecture.Architecture
cpu string
label string
}
type priorityExpanderDescription struct {
template string
namespace string
p10 string
p20 string
}
func (clusterAutoscaler *clusterAutoscalerDescription) createClusterAutoscaler(oc *exutil.CLI) {
e2e.Logf("Creating clusterautoscaler ...")
var err error
if strings.Contains(clusterAutoscaler.template, "util") {
err = applyResourceFromTemplate(oc, "-f", clusterAutoscaler.template, "-p", "MAXNODE="+strconv.Itoa(clusterAutoscaler.maxNode), "MINCORE="+strconv.Itoa(clusterAutoscaler.minCore), "MAXCORE="+strconv.Itoa(clusterAutoscaler.maxCore), "MINMEMORY="+strconv.Itoa(clusterAutoscaler.minMemory), "MAXMEMORY="+strconv.Itoa(clusterAutoscaler.maxMemory), "UTILIZATIONTHRESHOLD="+clusterAutoscaler.utilizationThreshold)
} else if strings.Contains(clusterAutoscaler.template, "verbose") {
err = applyResourceFromTemplate(oc, "-f", clusterAutoscaler.template, "-p", "MAXNODE="+strconv.Itoa(clusterAutoscaler.maxNode), "MINCORE="+strconv.Itoa(clusterAutoscaler.minCore), "MAXCORE="+strconv.Itoa(clusterAutoscaler.maxCore), "MINMEMORY="+strconv.Itoa(clusterAutoscaler.minMemory), "MAXMEMORY="+strconv.Itoa(clusterAutoscaler.maxMemory), "LOGVERBOSITY="+strconv.Itoa(clusterAutoscaler.logVerbosity))
} else {
err = applyResourceFromTemplate(oc, "-f", clusterAutoscaler.template, "-p", "MAXNODE="+strconv.Itoa(clusterAutoscaler.maxNode), "MINCORE="+strconv.Itoa(clusterAutoscaler.minCore), "MAXCORE="+strconv.Itoa(clusterAutoscaler.maxCore), "MINMEMORY="+strconv.Itoa(clusterAutoscaler.minMemory), "MAXMEMORY="+strconv.Itoa(clusterAutoscaler.maxMemory), "EXPANDER="+clusterAutoscaler.expander.String())
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (clusterAutoscaler *clusterAutoscalerDescription) deleteClusterAutoscaler(oc *exutil.CLI) error {
e2e.Logf("Deleting clusterautoscaler ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterautoscaler", "default").Execute()
}
func (machineAutoscaler *machineAutoscalerDescription) createMachineAutoscaler(oc *exutil.CLI) {
e2e.Logf("Creating machineautoscaler ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", machineAutoscaler.template, "-p", "NAME="+machineAutoscaler.name, "NAMESPACE="+machineAPINamespace, "MAXREPLICAS="+strconv.Itoa(machineAutoscaler.maxReplicas), "MINREPLICAS="+strconv.Itoa(machineAutoscaler.minReplicas), "MACHINESETNAME="+machineAutoscaler.machineSetName)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (machineAutoscaler *machineAutoscalerDescription) deleteMachineAutoscaler(oc *exutil.CLI) error {
e2e.Logf("Deleting a machineautoscaler ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("machineautoscaler", machineAutoscaler.name, "-n", machineAPINamespace).Execute()
}
func (workLoad *workLoadDescription) createWorkLoad(oc *exutil.CLI) {
e2e.Logf("Creating workLoad ...")
var err error
if strings.Contains(workLoad.template, "affinity") {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", workLoad.template, "-p", "NAME="+workLoad.name, "NAMESPACE="+workLoad.namespace, "ARCH="+workLoad.arch.String(), "CPU="+workLoad.cpu)
} else if strings.Contains(workLoad.template, "label") {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", workLoad.template, "-p", "NAME="+workLoad.name, "NAMESPACE="+workLoad.namespace, "LABEL="+workLoad.label)
} else {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", workLoad.template, "-p", "NAME="+workLoad.name, "NAMESPACE="+workLoad.namespace)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (workLoad *workLoadDescription) deleteWorkLoad(oc *exutil.CLI) error {
e2e.Logf("Deleting workload ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("job", workLoad.name, "-n", machineAPINamespace).Execute()
}
func getWorkLoadCPU(oc *exutil.CLI, machineSetName string) string {
e2e.Logf("Setting workload CPU ...")
cpuAnnotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machineSetName, "-n", machineAPINamespace, "-o=jsonpath={.metadata.annotations.machine\\.openshift\\.io\\/vCPU}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cpuFloat, err := strconv.ParseFloat(cpuAnnotation, 64)
o.Expect(err).NotTo(o.HaveOccurred())
cpu := cpuFloat * 1000 * 0.6
return strconv.FormatFloat(cpu, 'f', -1, 64) + "m"
}
func (priorityExpander *priorityExpanderDescription) createPriorityExpander(oc *exutil.CLI) {
e2e.Logf("Creating clusterAutoscalerPriorityExpander ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", priorityExpander.template, "NAMESPACE="+priorityExpander.namespace, "-p", "P10="+priorityExpander.p10, "P20="+priorityExpander.p20)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (priorityExpander *priorityExpanderDescription) deletePriorityExpander(oc *exutil.CLI) error {
e2e.Logf("Deleting clusterAutoscalerPriorityExpander ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("cm", "cluster-autoscaler-priority-expander", "-n", machineAPINamespace).Execute()
}
// String returns the string value for the given Expander
func (a ExpanderImplementation) String() string {
switch a {
case Random:
return "Random"
case LeastWaste:
return "LeastWaste"
case Priority:
return "Priority"
default:
e2e.Failf("Unknown expander %d", a)
}
return ""
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 0ba2856b-9259-43d8-a719-d276ffdd39e3 | createClusterAutoscaler | ['"strconv"', '"strings"', 'exutil "github.com/openshift/openshift-tests-private/test/extended/util"'] | ['clusterAutoscalerDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (clusterAutoscaler *clusterAutoscalerDescription) createClusterAutoscaler(oc *exutil.CLI) {
e2e.Logf("Creating clusterautoscaler ...")
var err error
if strings.Contains(clusterAutoscaler.template, "util") {
err = applyResourceFromTemplate(oc, "-f", clusterAutoscaler.template, "-p", "MAXNODE="+strconv.Itoa(clusterAutoscaler.maxNode), "MINCORE="+strconv.Itoa(clusterAutoscaler.minCore), "MAXCORE="+strconv.Itoa(clusterAutoscaler.maxCore), "MINMEMORY="+strconv.Itoa(clusterAutoscaler.minMemory), "MAXMEMORY="+strconv.Itoa(clusterAutoscaler.maxMemory), "UTILIZATIONTHRESHOLD="+clusterAutoscaler.utilizationThreshold)
} else if strings.Contains(clusterAutoscaler.template, "verbose") {
err = applyResourceFromTemplate(oc, "-f", clusterAutoscaler.template, "-p", "MAXNODE="+strconv.Itoa(clusterAutoscaler.maxNode), "MINCORE="+strconv.Itoa(clusterAutoscaler.minCore), "MAXCORE="+strconv.Itoa(clusterAutoscaler.maxCore), "MINMEMORY="+strconv.Itoa(clusterAutoscaler.minMemory), "MAXMEMORY="+strconv.Itoa(clusterAutoscaler.maxMemory), "LOGVERBOSITY="+strconv.Itoa(clusterAutoscaler.logVerbosity))
} else {
err = applyResourceFromTemplate(oc, "-f", clusterAutoscaler.template, "-p", "MAXNODE="+strconv.Itoa(clusterAutoscaler.maxNode), "MINCORE="+strconv.Itoa(clusterAutoscaler.minCore), "MAXCORE="+strconv.Itoa(clusterAutoscaler.maxCore), "MINMEMORY="+strconv.Itoa(clusterAutoscaler.minMemory), "MAXMEMORY="+strconv.Itoa(clusterAutoscaler.maxMemory), "EXPANDER="+clusterAutoscaler.expander.String())
}
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | fead1792-c58e-43d2-9011-fb2ea9c7f15b | deleteClusterAutoscaler | ['clusterAutoscalerDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (clusterAutoscaler *clusterAutoscalerDescription) deleteClusterAutoscaler(oc *exutil.CLI) error {
e2e.Logf("Deleting clusterautoscaler ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterautoscaler", "default").Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 2b4a42f5-84ec-4dd9-971b-6c5119fd3f04 | createMachineAutoscaler | ['"strconv"'] | ['machineAutoscalerDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (machineAutoscaler *machineAutoscalerDescription) createMachineAutoscaler(oc *exutil.CLI) {
e2e.Logf("Creating machineautoscaler ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", machineAutoscaler.template, "-p", "NAME="+machineAutoscaler.name, "NAMESPACE="+machineAPINamespace, "MAXREPLICAS="+strconv.Itoa(machineAutoscaler.maxReplicas), "MINREPLICAS="+strconv.Itoa(machineAutoscaler.minReplicas), "MACHINESETNAME="+machineAutoscaler.machineSetName)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | 61251fc3-a878-4c0e-b4c3-dd84bf0dc810 | deleteMachineAutoscaler | ['machineAutoscalerDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (machineAutoscaler *machineAutoscalerDescription) deleteMachineAutoscaler(oc *exutil.CLI) error {
e2e.Logf("Deleting a machineautoscaler ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("machineautoscaler", machineAutoscaler.name, "-n", machineAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | e3ddb117-1ff2-49e0-8e3f-b7ec1182fe70 | createWorkLoad | ['"strings"'] | ['workLoadDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (workLoad *workLoadDescription) createWorkLoad(oc *exutil.CLI) {
e2e.Logf("Creating workLoad ...")
var err error
if strings.Contains(workLoad.template, "affinity") {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", workLoad.template, "-p", "NAME="+workLoad.name, "NAMESPACE="+workLoad.namespace, "ARCH="+workLoad.arch.String(), "CPU="+workLoad.cpu)
} else if strings.Contains(workLoad.template, "label") {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", workLoad.template, "-p", "NAME="+workLoad.name, "NAMESPACE="+workLoad.namespace, "LABEL="+workLoad.label)
} else {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", workLoad.template, "-p", "NAME="+workLoad.name, "NAMESPACE="+workLoad.namespace)
}
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | 62627b5a-5ed1-4ef7-85ac-7a744884be60 | deleteWorkLoad | ['workLoadDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (workLoad *workLoadDescription) deleteWorkLoad(oc *exutil.CLI) error {
e2e.Logf("Deleting workload ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("job", workLoad.name, "-n", machineAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | a94ba725-6184-4f0d-a42d-a1705c09218c | getWorkLoadCPU | ['"strconv"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func getWorkLoadCPU(oc *exutil.CLI, machineSetName string) string {
e2e.Logf("Setting workload CPU ...")
cpuAnnotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machineSetName, "-n", machineAPINamespace, "-o=jsonpath={.metadata.annotations.machine\\.openshift\\.io\\/vCPU}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cpuFloat, err := strconv.ParseFloat(cpuAnnotation, 64)
o.Expect(err).NotTo(o.HaveOccurred())
cpu := cpuFloat * 1000 * 0.6
return strconv.FormatFloat(cpu, 'f', -1, 64) + "m"
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 62bc8482-a367-42d4-8f43-e6a85098dea3 | createPriorityExpander | ['priorityExpanderDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (priorityExpander *priorityExpanderDescription) createPriorityExpander(oc *exutil.CLI) {
e2e.Logf("Creating clusterAutoscalerPriorityExpander ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", priorityExpander.template, "NAMESPACE="+priorityExpander.namespace, "-p", "P10="+priorityExpander.p10, "P20="+priorityExpander.p20)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 48865fa3-21ab-47b2-8983-68bfb0d380cf | deletePriorityExpander | ['priorityExpanderDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (priorityExpander *priorityExpanderDescription) deletePriorityExpander(oc *exutil.CLI) error {
e2e.Logf("Deleting clusterAutoscalerPriorityExpander ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("cm", "cluster-autoscaler-priority-expander", "-n", machineAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 043a9a20-54dd-4cd9-99af-9bde968f47f6 | String | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/autoscaler_utils.go | func (a ExpanderImplementation) String() string {
switch a {
case Random:
return "Random"
case LeastWaste:
return "LeastWaste"
case Priority:
return "Priority"
default:
e2e.Failf("Unknown expander %d", a)
}
return ""
} | clusterinfrastructure | |||||
test | openshift/openshift-tests-private | 8dcdabca-279b-4ccf-8c14-d12f9df37e55 | bmh_related | import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/bmh_related.go | package clusterinfrastructure
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
iaasPlatform clusterinfra.PlatformType
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = clusterinfra.CheckPlatform(oc)
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Critical-29147-Check that all the baremetalhosts are up and running", func() {
g.By("Check if baremetal cluster")
if !(iaasPlatform == clusterinfra.BareMetal) {
e2e.Logf("Cluster is: %s", iaasPlatform.String())
g.Skip("For Non-baremetal cluster , this is not supported!")
}
g.By("Check if baremetal hosts are up and running")
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "--all-namespaces", "-o=jsonpath={.items[*].status.poweredOn}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "false") {
g.By("Issue with bmh provisioning please review")
e2e.Failf("baremetal hosts not provisioned properly")
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Critical-32198-Verify all master bmh are 'provisioned'", func() {
g.By("Check if baremetal cluster")
if !(iaasPlatform == clusterinfra.BareMetal) {
e2e.Logf("Cluster is: %s", iaasPlatform.String())
g.Skip("For Non-baremetal cluster , this is not supported!")
}
g.By("Verify all master bmh are 'externally provisioned'")
bmhNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, bmhMastersWorkers := range strings.Fields(bmhNames) {
if strings.Contains(bmhMastersWorkers, "master") {
bmhMasters := bmhMastersWorkers
g.By("Check if master bmh is provisioned")
errorCount, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhMasters, "-n", machineAPINamespace, "-o=jsonpath={.status.errorCount}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if errorCount != "0" {
e2e.Failf("baremetal master not provisioned")
}
}
}
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 4a3bde9a-c34f-4457-a0a0-52c6da0996cc | Author:miyadav-NonHyperShiftHOST-Critical-29147-Check that all the baremetalhosts are up and running | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/bmh_related.go | g.It("Author:miyadav-NonHyperShiftHOST-Critical-29147-Check that all the baremetalhosts are up and running", func() {
g.By("Check if baremetal cluster")
if !(iaasPlatform == clusterinfra.BareMetal) {
e2e.Logf("Cluster is: %s", iaasPlatform.String())
g.Skip("For Non-baremetal cluster , this is not supported!")
}
g.By("Check if baremetal hosts are up and running")
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "--all-namespaces", "-o=jsonpath={.items[*].status.poweredOn}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "false") {
g.By("Issue with bmh provisioning please review")
e2e.Failf("baremetal hosts not provisioned properly")
}
}) | |||||
test case | openshift/openshift-tests-private | f4a5fd21-c2ac-43b5-9cd5-8d0730971d58 | Author:miyadav-NonHyperShiftHOST-Critical-32198-Verify all master bmh are 'provisioned' | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/bmh_related.go | g.It("Author:miyadav-NonHyperShiftHOST-Critical-32198-Verify all master bmh are 'provisioned'", func() {
g.By("Check if baremetal cluster")
if !(iaasPlatform == clusterinfra.BareMetal) {
e2e.Logf("Cluster is: %s", iaasPlatform.String())
g.Skip("For Non-baremetal cluster , this is not supported!")
}
g.By("Verify all master bmh are 'externally provisioned'")
bmhNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, bmhMastersWorkers := range strings.Fields(bmhNames) {
if strings.Contains(bmhMastersWorkers, "master") {
bmhMasters := bmhMastersWorkers
g.By("Check if master bmh is provisioned")
errorCount, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhMasters, "-n", machineAPINamespace, "-o=jsonpath={.status.errorCount}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if errorCount != "0" {
e2e.Failf("baremetal master not provisioned")
}
}
}
}) | |||||
test | openshift/openshift-tests-private | aab9686c-6b84-4c8d-a1ec-2554a6b240aa | capi | import (
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | package clusterinfrastructure
import (
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure CAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-api-operator", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-51061-Enable cluster API with feature gate [Disruptive]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP)
publicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-n", "openshift-dns", "-o=jsonpath={.spec.publicZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check if cluster api is deployed, if no, enable it")
capi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(capi) == 0 {
g.By("Enable cluster api with feature gate")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate/cluster", "-p", `{"spec":{"featureSet": "TechPreviewNoUpgrade"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check cluster is still healthy")
waitForClusterHealthy(oc)
}
g.By("Check if cluster api is deployed")
// Need to give more time in case cluster is private , we have seen it takes time , even after cluster becomes healthy , this happens only when publicZone is not present
g.By("if publicZone is {id:qe} or any other value it implies not a private set up, so no need to wait")
if publicZone == "" {
time.Sleep(360)
}
err = wait.Poll(20*time.Second, 6*time.Minute, func() (bool, error) {
capi, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(capi, "cluster-capi-operator") {
return true, nil
}
e2e.Logf("cluster-capi-operator pod hasn't been deployed, continue to next round")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "cluster-capi-operator pod deploy failed")
g.By("Check if machine approver is deployed")
approver, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", machineApproverNamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(approver).To(o.ContainSubstring("machine-approver-capi"))
g.By("Check user data secret is copied from openshift-machine-api namespace to openshift-cluster-api")
secret, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(secret).To(o.ContainSubstring("worker-user-data"))
})
// author: [email protected]
g.It("Author:zhsun-Medium-51141-worker-user-data secret should be synced up [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.VSphere)
skipForCAPINotExist(oc)
g.By("Delete worker-user-data in openshift-cluster-api namespace")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "worker-user-data", "-n", clusterAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check user-data secret is synced up from openshift-machine-api to openshift-cluster-api")
err = wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) {
userData, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", clusterAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(userData, "worker-user-data") {
return true, nil
}
e2e.Logf("Continue to next round")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "user-data secret isn't synced up from openshift-machine-api to openshift-cluster-api")
})
// author: [email protected]
g.It("Author:dtobolik-NonHyperShiftHOST-Medium-61980-Workload annotation missing from deployments", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.VSphere)
skipForCAPINotExist(oc)
deployments, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", clusterAPINamespace, "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
deploymentList := strings.Split(deployments, "\n")
for _, deployment := range deploymentList {
result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(deployment, "-n", clusterAPINamespace, `-o=jsonpath={.spec.template.metadata.annotations.target\.workload\.openshift\.io/management}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.Equal(`{"effect": "PreferredDuringScheduling"}`))
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-71695-Core CAPI CRDs not deployed on unsupported platforms even when explicitly needed by other operators", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.GCP, clusterinfra.AWS, clusterinfra.AlibabaCloud, clusterinfra.IBMCloud, clusterinfra.Nutanix)
skipForCAPINotExist(oc)
expectedCRDs := `clusterclasses.cluster.x-k8s.io
clusterresourcesetbindings.addons.cluster.x-k8s.io
clusterresourcesets.addons.cluster.x-k8s.io
clusters.cluster.x-k8s.io
extensionconfigs.runtime.cluster.x-k8s.io
machinedeployments.cluster.x-k8s.io
machinehealthchecks.cluster.x-k8s.io
machinepools.cluster.x-k8s.io
machines.cluster.x-k8s.io
machinesets.cluster.x-k8s.io`
expectedCRD := strings.Split(expectedCRDs, "\n")
g.By("Get capi crds in techpreview cluster")
for _, crd := range expectedCRD {
// Execute `oc get crds <CRD name>` for each CRD
crds, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crds", crd, `-o=jsonpath={.metadata.annotations}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(crds).To(o.ContainSubstring("CustomNoUpgrade"))
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-71913-Promote CAPI IPAM CRDs to GA", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.GCP, clusterinfra.AWS, clusterinfra.AlibabaCloud, clusterinfra.IBMCloud, clusterinfra.Nutanix, clusterinfra.PowerVS)
expectedCRDs := `ipaddressclaims.ipam.cluster.x-k8s.io
ipaddresses.ipam.cluster.x-k8s.io`
expectedCRD := strings.Split(expectedCRDs, "\n")
g.By("Get capi crds in cluster")
for _, crd := range expectedCRD {
// Execute `oc get crds <CRD name>` for each CRD
crds, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crds", crd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(crds).NotTo(o.ContainSubstring("not found"))
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Critical-73620-terminationMessagePolicy should be FallbackToLogsOnError", func() {
skipForCAPINotExist(oc)
podNames, err := exutil.GetAllPods(oc, "openshift-cluster-api")
if err != nil {
g.Fail("cluster-api pods seems unstable")
}
g.By("Get pod yaml and check terminationMessagePolicy")
for _, podName := range podNames {
terminationMessagePolicy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", podName, "-n", "openshift-cluster-api", "-o=jsonpath={.spec.containers[0].terminationMessagePolicy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(terminationMessagePolicy).To(o.ContainSubstring("FallbackToLogsOnError"))
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-76078-[capi] Should not be able to remove Infrastructure Cluster resources [Disruptive]", func() {
skipForCAPINotExist(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.VSphere)
g.By("Check Infracluster")
iaasPlatform := clusterinfra.CheckPlatform(oc)
clusterID := clusterinfra.GetInfrastructureName(oc)
infraObj, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(strings.ToUpper(iaasPlatform.String())+"Cluster", "-n", clusterAPINamespace, "-o", "jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if infraObj == clusterID {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(strings.ToUpper(iaasPlatform.String())+"Cluster", infraObj, "-n", clusterAPINamespace).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("denied request: InfraCluster resources with metadata.name corresponding to the cluster infrastructureName cannot be deleted."))
} else {
g.Skip("We are not worried if infrastructure is not same as clusterId...")
}
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 8cc91d87-48d2-4457-9681-4cfc85e3c9df | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-51061-Enable cluster API with feature gate [Disruptive] | ['"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-51061-Enable cluster API with feature gate [Disruptive]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP)
publicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-n", "openshift-dns", "-o=jsonpath={.spec.publicZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check if cluster api is deployed, if no, enable it")
capi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(capi) == 0 {
g.By("Enable cluster api with feature gate")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate/cluster", "-p", `{"spec":{"featureSet": "TechPreviewNoUpgrade"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check cluster is still healthy")
waitForClusterHealthy(oc)
}
g.By("Check if cluster api is deployed")
// Need to give more time in case cluster is private , we have seen it takes time , even after cluster becomes healthy , this happens only when publicZone is not present
g.By("if publicZone is {id:qe} or any other value it implies not a private set up, so no need to wait")
if publicZone == "" {
time.Sleep(360)
}
err = wait.Poll(20*time.Second, 6*time.Minute, func() (bool, error) {
capi, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(capi, "cluster-capi-operator") {
return true, nil
}
e2e.Logf("cluster-capi-operator pod hasn't been deployed, continue to next round")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "cluster-capi-operator pod deploy failed")
g.By("Check if machine approver is deployed")
approver, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", machineApproverNamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(approver).To(o.ContainSubstring("machine-approver-capi"))
g.By("Check user data secret is copied from openshift-machine-api namespace to openshift-cluster-api")
secret, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(secret).To(o.ContainSubstring("worker-user-data"))
}) | |||||
test case | openshift/openshift-tests-private | 1ba47c0b-50c6-49a7-a692-9ba1e0bffd20 | Author:zhsun-Medium-51141-worker-user-data secret should be synced up [Disruptive] | ['"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:zhsun-Medium-51141-worker-user-data secret should be synced up [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.VSphere)
skipForCAPINotExist(oc)
g.By("Delete worker-user-data in openshift-cluster-api namespace")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "worker-user-data", "-n", clusterAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check user-data secret is synced up from openshift-machine-api to openshift-cluster-api")
err = wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) {
userData, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", clusterAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(userData, "worker-user-data") {
return true, nil
}
e2e.Logf("Continue to next round")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "user-data secret isn't synced up from openshift-machine-api to openshift-cluster-api")
}) | |||||
test case | openshift/openshift-tests-private | 25a22ffa-1621-41fa-95ec-6dad914b5101 | Author:dtobolik-NonHyperShiftHOST-Medium-61980-Workload annotation missing from deployments | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:dtobolik-NonHyperShiftHOST-Medium-61980-Workload annotation missing from deployments", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.VSphere)
skipForCAPINotExist(oc)
deployments, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", clusterAPINamespace, "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
deploymentList := strings.Split(deployments, "\n")
for _, deployment := range deploymentList {
result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(deployment, "-n", clusterAPINamespace, `-o=jsonpath={.spec.template.metadata.annotations.target\.workload\.openshift\.io/management}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.Equal(`{"effect": "PreferredDuringScheduling"}`))
}
}) | |||||
test case | openshift/openshift-tests-private | 51b2b072-609d-4dc5-b1c0-99d0bf2829c4 | Author:miyadav-NonHyperShiftHOST-Medium-71695-Core CAPI CRDs not deployed on unsupported platforms even when explicitly needed by other operators | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-71695-Core CAPI CRDs not deployed on unsupported platforms even when explicitly needed by other operators", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.GCP, clusterinfra.AWS, clusterinfra.AlibabaCloud, clusterinfra.IBMCloud, clusterinfra.Nutanix)
skipForCAPINotExist(oc)
expectedCRDs := `clusterclasses.cluster.x-k8s.io
clusterresourcesetbindings.addons.cluster.x-k8s.io
clusterresourcesets.addons.cluster.x-k8s.io
clusters.cluster.x-k8s.io
extensionconfigs.runtime.cluster.x-k8s.io
machinedeployments.cluster.x-k8s.io
machinehealthchecks.cluster.x-k8s.io
machinepools.cluster.x-k8s.io
machines.cluster.x-k8s.io
machinesets.cluster.x-k8s.io`
expectedCRD := strings.Split(expectedCRDs, "\n")
g.By("Get capi crds in techpreview cluster")
for _, crd := range expectedCRD {
// Execute `oc get crds <CRD name>` for each CRD
crds, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crds", crd, `-o=jsonpath={.metadata.annotations}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(crds).To(o.ContainSubstring("CustomNoUpgrade"))
}
}) | |||||
test case | openshift/openshift-tests-private | fb4dc5c6-9f7e-4787-aa65-70d7925c8ccc | Author:miyadav-NonHyperShiftHOST-Medium-71913-Promote CAPI IPAM CRDs to GA | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-71913-Promote CAPI IPAM CRDs to GA", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.VSphere, clusterinfra.GCP, clusterinfra.AWS, clusterinfra.AlibabaCloud, clusterinfra.IBMCloud, clusterinfra.Nutanix, clusterinfra.PowerVS)
expectedCRDs := `ipaddressclaims.ipam.cluster.x-k8s.io
ipaddresses.ipam.cluster.x-k8s.io`
expectedCRD := strings.Split(expectedCRDs, "\n")
g.By("Get capi crds in cluster")
for _, crd := range expectedCRD {
// Execute `oc get crds <CRD name>` for each CRD
crds, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crds", crd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(crds).NotTo(o.ContainSubstring("not found"))
}
}) | |||||
test case | openshift/openshift-tests-private | b4a88f2a-e3fb-4975-a183-c784cf74be1d | Author:miyadav-NonHyperShiftHOST-Critical-73620-terminationMessagePolicy should be FallbackToLogsOnError | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:miyadav-NonHyperShiftHOST-Critical-73620-terminationMessagePolicy should be FallbackToLogsOnError", func() {
skipForCAPINotExist(oc)
podNames, err := exutil.GetAllPods(oc, "openshift-cluster-api")
if err != nil {
g.Fail("cluster-api pods seems unstable")
}
g.By("Get pod yaml and check terminationMessagePolicy")
for _, podName := range podNames {
terminationMessagePolicy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", podName, "-n", "openshift-cluster-api", "-o=jsonpath={.spec.containers[0].terminationMessagePolicy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(terminationMessagePolicy).To(o.ContainSubstring("FallbackToLogsOnError"))
}
}) | ||||||
test case | openshift/openshift-tests-private | f4bdf630-aba1-4926-ba0a-e63cc8bee347 | Author:miyadav-NonHyperShiftHOST-Medium-76078-[capi] Should not be able to remove Infrastructure Cluster resources [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-76078-[capi] Should not be able to remove Infrastructure Cluster resources [Disruptive]", func() {
skipForCAPINotExist(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.VSphere)
g.By("Check Infracluster")
iaasPlatform := clusterinfra.CheckPlatform(oc)
clusterID := clusterinfra.GetInfrastructureName(oc)
infraObj, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(strings.ToUpper(iaasPlatform.String())+"Cluster", "-n", clusterAPINamespace, "-o", "jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if infraObj == clusterID {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(strings.ToUpper(iaasPlatform.String())+"Cluster", infraObj, "-n", clusterAPINamespace).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("denied request: InfraCluster resources with metadata.name corresponding to the cluster infrastructureName cannot be deleted."))
} else {
g.Skip("We are not worried if infrastructure is not same as clusterId...")
}
}) | |||||
file | openshift/openshift-tests-private | 2b25a265-e04e-458c-b914-c7d014aa01c0 | ccm_util | import (
"encoding/base64"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | package clusterinfrastructure
import (
"encoding/base64"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type ingressControllerDescription struct {
template string
name string
}
type loadBalancerServiceDescription struct {
template string
name string
awssubnet string
awslabel string
gcptype string
azureinternal bool
azuresubnet string
namespace string
}
type podDescription struct {
template string
name string
namespace string
}
func (ingressController *ingressControllerDescription) createIngressController(oc *exutil.CLI) {
e2e.Logf("Creating ingressController ...")
exutil.CreateNsResourceFromTemplate(oc, "openshift-ingress-operator", "--ignore-unknown-parameters=true", "-f", ingressController.template, "-p", "NAME="+ingressController.name)
}
func (ingressController *ingressControllerDescription) deleteIngressController(oc *exutil.CLI) error {
e2e.Logf("Deleting ingressController ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("ingressController", ingressController.name, "-n", "openshift-ingress-operator").Execute()
}
func (loadBalancerService *loadBalancerServiceDescription) createLoadBalancerService(oc *exutil.CLI) {
e2e.Logf("Creating loadBalancerService ...")
var err error
if strings.Contains(loadBalancerService.template, "annotations") {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", loadBalancerService.template, "-p", "NAME="+loadBalancerService.name, "NAMESPACE="+loadBalancerService.namespace, "AWSSUBNET="+loadBalancerService.awssubnet, "AWSLABEL="+loadBalancerService.awslabel, "GCPTYPE="+loadBalancerService.gcptype, "AZUREINTERNAL="+strconv.FormatBool(loadBalancerService.azureinternal), "AZURESUNBET="+loadBalancerService.azuresubnet)
} else {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", loadBalancerService.template, "-p", "NAME="+loadBalancerService.name, "NAMESPACE="+loadBalancerService.namespace)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (loadBalancerService *loadBalancerServiceDescription) deleteLoadBalancerService(oc *exutil.CLI) error {
e2e.Logf("Deleting loadBalancerService ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", loadBalancerService.name, "-n", loadBalancerService.namespace).Execute()
}
func (pod *podDescription) createPod(oc *exutil.CLI) {
e2e.Logf("Creating pod ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pod *podDescription) deletePod(oc *exutil.CLI) error {
e2e.Logf("Deleting pod ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.name, "-n", pod.namespace).Execute()
}
// waitForClusterHealthy check if new machineconfig is applied successfully
func waitForClusterHealthy(oc *exutil.CLI) {
e2e.Logf("Waiting for the cluster healthy ...")
// sleep for 5 minites to make sure related mcp start to update
time.Sleep(5 * time.Minute)
timeToWait := time.Duration(getNodeCount(oc)*5) * time.Minute
pollErr := wait.Poll(1*time.Minute, timeToWait-5, func() (bool, error) {
master, errMaster := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-o", "jsonpath='{.status.conditions[?(@.type==\"Updated\")].status}'").Output()
worker, errWorker := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-o", "jsonpath='{.status.conditions[?(@.type==\"Updated\")].status}'").Output()
if errMaster != nil || errWorker != nil {
e2e.Logf("the err:%v,%v, and try next round", errMaster, errWorker)
return false, nil
}
if strings.Contains(master, "True") && strings.Contains(worker, "True") {
e2e.Logf("mc operation is completed on mcp")
return true, nil
}
return false, nil
})
if pollErr != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Failf("Expected cluster is not healthy after waiting up to %s minutes ...", timeToWait)
}
e2e.Logf("Cluster is healthy ...")
}
func getNodeCount(oc *exutil.CLI) int {
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeCount := int(strings.Count(nodes, "Ready")) + int(strings.Count(nodes, "NotReady"))
return nodeCount
}
// SkipIfCloudControllerManagerNotDeployed check if ccm is deployed
func SkipIfCloudControllerManagerNotDeployed(oc *exutil.CLI) {
var ccm string
var err error
ccm, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.items[*].metadata.name}").Output()
if err == nil {
if len(ccm) == 0 {
g.Skip("Skip for cloud-controller-manager is not deployed!")
}
}
}
// wait for the named resource is disappeared, e.g. used while router deployment rolled out
func waitForResourceToDisappear(oc *exutil.CLI, ns, rsname string) error {
return wait.Poll(20*time.Second, 5*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(rsname, "-n", ns).Output()
e2e.Logf("check resource %v and got: %v", rsname, status)
primary := false
if err != nil {
if strings.Contains(status, "NotFound") {
e2e.Logf("the resource is disappeared!")
primary = true
} else {
e2e.Logf("failed to get the resource: %v, retrying...", err)
}
} else {
e2e.Logf("the resource is still there, retrying...")
}
return primary, nil
})
}
func waitForPodWithLabelReady(oc *exutil.CLI, ns, label string) error {
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
e2e.Logf("the Ready status of pod is %v", status)
if err != nil || status == "" {
e2e.Logf("failed to get pod status: %v, retrying...", err)
return false, nil
}
if strings.Contains(status, "False") {
e2e.Logf("the pod Ready status not met; wanted True but got %v, retrying...", status)
return false, nil
}
return true, nil
})
}
func waitForClusterOperatorsReady(oc *exutil.CLI, clusterOperators ...string) error {
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
for _, co := range clusterOperators {
coState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/"+co, "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err != nil || coState == "" {
e2e.Logf("failed to get co state: %v, retrying...", err)
return false, nil
}
if !strings.Contains(coState, "TrueFalseFalse") {
e2e.Logf("the co: %v status not met; wanted TrueFalseFalse but got %v, retrying...", co, coState)
return false, nil
}
}
return true, nil
})
}
// getLBSvcIP get Load Balancer service IP/Hostname
func getLBSvcIP(oc *exutil.CLI, loadBalancerService loadBalancerServiceDescription) string {
e2e.Logf("Getting the Load Balancer service IP ...")
iaasPlatform := clusterinfra.CheckPlatform(oc)
var jsonString string
if iaasPlatform == clusterinfra.AWS || iaasPlatform == clusterinfra.IBMCloud {
jsonString = "-o=jsonpath={.status.loadBalancer.ingress[0].hostname}"
} else {
jsonString = "-o=jsonpath={.status.loadBalancer.ingress[0].ip}"
}
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
svcStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", loadBalancerService.name, "-n", loadBalancerService.namespace, jsonString).Output()
if err != nil || svcStatus == "pending" || svcStatus == "" {
e2e.Logf("External-IP is not assigned and waiting up to 20 seconds ...")
return false, nil
}
e2e.Logf("External-IP is assigned: %s" + svcStatus)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "External-IP is not assigned in 5 minite")
svcStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "-n", loadBalancerService.namespace, loadBalancerService.name, jsonString).Output()
e2e.Logf("The %s lb service ip/hostname is %q", loadBalancerService.name, svcStatus)
return svcStatus
}
func waitForLoadBalancerReady(oc *exutil.CLI, externalIP string) {
e2e.Logf("Getting the Load Balancer service IP ...")
errWait := wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) {
msg, err := exec.Command("bash", "-c", "curl "+externalIP).Output()
if err != nil {
e2e.Logf("failed to execute the curl: %s. Trying again", err)
return false, nil
}
e2e.Logf("msg -->: %s", msg)
if !strings.Contains(string(msg), "Hello-OpenShift") {
e2e.Logf("Load balancer is not ready yet and waiting up to 5 minutes ...")
return false, nil
}
e2e.Logf("Load balancer is ready")
return true, nil
})
exutil.AssertWaitPollNoErr(errWait, "Load balancer is not ready after waiting up to 5 minutes ...")
}
func appendPullSecretAuth(authFile, regRouter, newRegUser, newRegPass string) (string, error) {
fieldValue := ""
if newRegUser == "" {
fieldValue = newRegPass
} else {
fieldValue = newRegUser + ":" + newRegPass
}
regToken := base64.StdEncoding.EncodeToString([]byte(fieldValue))
authDir, _ := filepath.Split(authFile)
newAuthFile := filepath.Join(authDir, fmt.Sprintf("%s.json", getRandomString()))
jqCMD := fmt.Sprintf(`cat %s | jq '.auths += {"%s":{"auth":"%s"}}' > %s`, authFile, regRouter, regToken, newAuthFile)
_, err := exec.Command("bash", "-c", jqCMD).Output()
if err != nil {
e2e.Logf("Fail to extract dockerconfig: %v", err)
return newAuthFile, err
}
return newAuthFile, nil
}
func extractPullSecret(oc *exutil.CLI) (string, error) {
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("registry-%s", getRandomString()))
err := os.Mkdir(tempDataDir, 0o755)
if err != nil {
e2e.Logf("Fail to create directory: %v", err)
return tempDataDir, err
}
err = oc.AsAdmin().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--confirm", "--to="+tempDataDir).Execute()
if err != nil {
e2e.Logf("Fail to extract dockerconfig: %v", err)
return tempDataDir, err
}
return tempDataDir, nil
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 7d1e288a-eafb-4612-8c45-3c070e1ccb3e | createIngressController | ['ingressControllerDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func (ingressController *ingressControllerDescription) createIngressController(oc *exutil.CLI) {
e2e.Logf("Creating ingressController ...")
exutil.CreateNsResourceFromTemplate(oc, "openshift-ingress-operator", "--ignore-unknown-parameters=true", "-f", ingressController.template, "-p", "NAME="+ingressController.name)
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | cd077f97-2105-4af3-a69e-2446d7b44df6 | deleteIngressController | ['ingressControllerDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func (ingressController *ingressControllerDescription) deleteIngressController(oc *exutil.CLI) error {
e2e.Logf("Deleting ingressController ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("ingressController", ingressController.name, "-n", "openshift-ingress-operator").Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | fec3926e-8c39-4c67-8f67-ebe8f57fe528 | createLoadBalancerService | ['"strconv"', '"strings"'] | ['loadBalancerServiceDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func (loadBalancerService *loadBalancerServiceDescription) createLoadBalancerService(oc *exutil.CLI) {
e2e.Logf("Creating loadBalancerService ...")
var err error
if strings.Contains(loadBalancerService.template, "annotations") {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", loadBalancerService.template, "-p", "NAME="+loadBalancerService.name, "NAMESPACE="+loadBalancerService.namespace, "AWSSUBNET="+loadBalancerService.awssubnet, "AWSLABEL="+loadBalancerService.awslabel, "GCPTYPE="+loadBalancerService.gcptype, "AZUREINTERNAL="+strconv.FormatBool(loadBalancerService.azureinternal), "AZURESUNBET="+loadBalancerService.azuresubnet)
} else {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", loadBalancerService.template, "-p", "NAME="+loadBalancerService.name, "NAMESPACE="+loadBalancerService.namespace)
}
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | 371318d1-dea1-47e4-a833-de66643f3713 | deleteLoadBalancerService | ['loadBalancerServiceDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func (loadBalancerService *loadBalancerServiceDescription) deleteLoadBalancerService(oc *exutil.CLI) error {
e2e.Logf("Deleting loadBalancerService ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", loadBalancerService.name, "-n", loadBalancerService.namespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 88cac8c5-4925-457c-af0c-4b983808ad10 | createPod | ['podDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func (pod *podDescription) createPod(oc *exutil.CLI) {
e2e.Logf("Creating pod ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | b01377ca-5773-400f-bc21-be6b206b4bc4 | deletePod | ['podDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func (pod *podDescription) deletePod(oc *exutil.CLI) error {
e2e.Logf("Deleting pod ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.name, "-n", pod.namespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | f169c9ea-dc5d-46ee-aa8f-ad4524e06e29 | waitForClusterHealthy | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func waitForClusterHealthy(oc *exutil.CLI) {
e2e.Logf("Waiting for the cluster healthy ...")
// sleep for 5 minites to make sure related mcp start to update
time.Sleep(5 * time.Minute)
timeToWait := time.Duration(getNodeCount(oc)*5) * time.Minute
pollErr := wait.Poll(1*time.Minute, timeToWait-5, func() (bool, error) {
master, errMaster := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-o", "jsonpath='{.status.conditions[?(@.type==\"Updated\")].status}'").Output()
worker, errWorker := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-o", "jsonpath='{.status.conditions[?(@.type==\"Updated\")].status}'").Output()
if errMaster != nil || errWorker != nil {
e2e.Logf("the err:%v,%v, and try next round", errMaster, errWorker)
return false, nil
}
if strings.Contains(master, "True") && strings.Contains(worker, "True") {
e2e.Logf("mc operation is completed on mcp")
return true, nil
}
return false, nil
})
if pollErr != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Failf("Expected cluster is not healthy after waiting up to %s minutes ...", timeToWait)
}
e2e.Logf("Cluster is healthy ...")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 1a9e99f0-ae02-48e8-bf83-9453b6614af1 | getNodeCount | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func getNodeCount(oc *exutil.CLI) int {
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeCount := int(strings.Count(nodes, "Ready")) + int(strings.Count(nodes, "NotReady"))
return nodeCount
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | b55dd6fe-a75f-4844-a9e8-f0ea18a86389 | SkipIfCloudControllerManagerNotDeployed | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func SkipIfCloudControllerManagerNotDeployed(oc *exutil.CLI) {
var ccm string
var err error
ccm, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.items[*].metadata.name}").Output()
if err == nil {
if len(ccm) == 0 {
g.Skip("Skip for cloud-controller-manager is not deployed!")
}
}
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 49d0d56f-2a6b-4ec1-93e5-7b5fbe3fe624 | waitForResourceToDisappear | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func waitForResourceToDisappear(oc *exutil.CLI, ns, rsname string) error {
return wait.Poll(20*time.Second, 5*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(rsname, "-n", ns).Output()
e2e.Logf("check resource %v and got: %v", rsname, status)
primary := false
if err != nil {
if strings.Contains(status, "NotFound") {
e2e.Logf("the resource is disappeared!")
primary = true
} else {
e2e.Logf("failed to get the resource: %v, retrying...", err)
}
} else {
e2e.Logf("the resource is still there, retrying...")
}
return primary, nil
})
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 51fc6a81-5637-4ccb-8f0c-a390bf055db2 | waitForPodWithLabelReady | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func waitForPodWithLabelReady(oc *exutil.CLI, ns, label string) error {
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
e2e.Logf("the Ready status of pod is %v", status)
if err != nil || status == "" {
e2e.Logf("failed to get pod status: %v, retrying...", err)
return false, nil
}
if strings.Contains(status, "False") {
e2e.Logf("the pod Ready status not met; wanted True but got %v, retrying...", status)
return false, nil
}
return true, nil
})
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | d4d95125-028a-4517-88db-91274a8fadaf | waitForClusterOperatorsReady | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func waitForClusterOperatorsReady(oc *exutil.CLI, clusterOperators ...string) error {
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
for _, co := range clusterOperators {
coState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/"+co, "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err != nil || coState == "" {
e2e.Logf("failed to get co state: %v, retrying...", err)
return false, nil
}
if !strings.Contains(coState, "TrueFalseFalse") {
e2e.Logf("the co: %v status not met; wanted TrueFalseFalse but got %v, retrying...", co, coState)
return false, nil
}
}
return true, nil
})
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 9357a702-c79a-4100-9563-583f07692c2e | getLBSvcIP | ['"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['loadBalancerServiceDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func getLBSvcIP(oc *exutil.CLI, loadBalancerService loadBalancerServiceDescription) string {
e2e.Logf("Getting the Load Balancer service IP ...")
iaasPlatform := clusterinfra.CheckPlatform(oc)
var jsonString string
if iaasPlatform == clusterinfra.AWS || iaasPlatform == clusterinfra.IBMCloud {
jsonString = "-o=jsonpath={.status.loadBalancer.ingress[0].hostname}"
} else {
jsonString = "-o=jsonpath={.status.loadBalancer.ingress[0].ip}"
}
err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) {
svcStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", loadBalancerService.name, "-n", loadBalancerService.namespace, jsonString).Output()
if err != nil || svcStatus == "pending" || svcStatus == "" {
e2e.Logf("External-IP is not assigned and waiting up to 20 seconds ...")
return false, nil
}
e2e.Logf("External-IP is assigned: %s" + svcStatus)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "External-IP is not assigned in 5 minite")
svcStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "-n", loadBalancerService.namespace, loadBalancerService.name, jsonString).Output()
e2e.Logf("The %s lb service ip/hostname is %q", loadBalancerService.name, svcStatus)
return svcStatus
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | da5a8693-d5ed-4be0-bc83-0bd1705ddcdb | waitForLoadBalancerReady | ['"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func waitForLoadBalancerReady(oc *exutil.CLI, externalIP string) {
e2e.Logf("Getting the Load Balancer service IP ...")
errWait := wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) {
msg, err := exec.Command("bash", "-c", "curl "+externalIP).Output()
if err != nil {
e2e.Logf("failed to execute the curl: %s. Trying again", err)
return false, nil
}
e2e.Logf("msg -->: %s", msg)
if !strings.Contains(string(msg), "Hello-OpenShift") {
e2e.Logf("Load balancer is not ready yet and waiting up to 5 minutes ...")
return false, nil
}
e2e.Logf("Load balancer is ready")
return true, nil
})
exutil.AssertWaitPollNoErr(errWait, "Load balancer is not ready after waiting up to 5 minutes ...")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | deb6aeec-29ab-4db5-83dc-2bba8685c32a | appendPullSecretAuth | ['"encoding/base64"', '"fmt"', '"os/exec"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func appendPullSecretAuth(authFile, regRouter, newRegUser, newRegPass string) (string, error) {
fieldValue := ""
if newRegUser == "" {
fieldValue = newRegPass
} else {
fieldValue = newRegUser + ":" + newRegPass
}
regToken := base64.StdEncoding.EncodeToString([]byte(fieldValue))
authDir, _ := filepath.Split(authFile)
newAuthFile := filepath.Join(authDir, fmt.Sprintf("%s.json", getRandomString()))
jqCMD := fmt.Sprintf(`cat %s | jq '.auths += {"%s":{"auth":"%s"}}' > %s`, authFile, regRouter, regToken, newAuthFile)
_, err := exec.Command("bash", "-c", jqCMD).Output()
if err != nil {
e2e.Logf("Fail to extract dockerconfig: %v", err)
return newAuthFile, err
}
return newAuthFile, nil
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 75e0378d-e297-493d-be60-01f7f4399d05 | extractPullSecret | ['"fmt"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm_util.go | func extractPullSecret(oc *exutil.CLI) (string, error) {
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("registry-%s", getRandomString()))
err := os.Mkdir(tempDataDir, 0o755)
if err != nil {
e2e.Logf("Fail to create directory: %v", err)
return tempDataDir, err
}
err = oc.AsAdmin().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--confirm", "--to="+tempDataDir).Execute()
if err != nil {
e2e.Logf("Fail to extract dockerconfig: %v", err)
return tempDataDir, err
}
return tempDataDir, nil
} | clusterinfrastructure | ||||
file | openshift/openshift-tests-private | f7538bdb-5dce-4c91-a8e2-20f8c8f386af | cpms_utils | import (
"io/ioutil"
"math/rand"
"os"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"github.com/tidwall/sjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | package clusterinfrastructure
import (
"io/ioutil"
"math/rand"
"os"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"github.com/tidwall/sjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// waitForCPMSUpdateCompleted wait the Update to complete
func waitForCPMSUpdateCompleted(oc *exutil.CLI, replicas int) {
e2e.Logf("Waiting for the Update completed ...")
timeToWait := time.Duration(replicas*50) * time.Minute
count := 0
err := wait.Poll(1*time.Minute, timeToWait, func() (bool, error) {
count++
if count == 1 {
e2e.Logf("Wait for the update to start and waiting up to 1 minutes ... count %d", count)
return false, nil
}
desiredReplicas, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.replicas}", "-n", machineAPINamespace).Output()
if err1 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
readyReplicas, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
if err2 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
currentReplicas, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.replicas}", "-n", machineAPINamespace).Output()
if err3 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
updatedReplicas, err4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.updatedReplicas}", "-n", machineAPINamespace).Output()
if err4 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
if desiredReplicas == currentReplicas && desiredReplicas == readyReplicas && desiredReplicas == updatedReplicas {
e2e.Logf("The Update is completed! desiredReplicas is %s, count %d", desiredReplicas, count)
return true, nil
}
e2e.Logf("The Update is still ongoing and waiting up to 1 minutes ... count %d, desiredReplicas is %s,currentReplicas is %s,readyReplicas is %s,updatedReplicas is %s", count, desiredReplicas, currentReplicas, readyReplicas, updatedReplicas)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Wait Update failed.")
}
// skipForCPMSNotStable skip the test if the cpms is not stable
func skipForCPMSNotStable(oc *exutil.CLI) {
readyReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
desiredReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
updatedReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.updatedReplicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !(desiredReplicas == currentReplicas && desiredReplicas == readyReplicas && desiredReplicas == updatedReplicas) {
g.Skip("Skip for cpms is not stable!")
}
}
// printNodeInfo print the output of oc get node
func printNodeInfo(oc *exutil.CLI) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
e2e.Logf("%v", output)
}
// getMachineSuffix get the machine suffix
func getMachineSuffix(oc *exutil.CLI, machineName string) string {
start := strings.LastIndex(machineName, "-")
suffix := machineName[start:]
return suffix
}
// checkIfCPMSIsStable check if the Update is completed
func checkIfCPMSIsStable(oc *exutil.CLI) bool {
err := wait.Poll(20*time.Second, 5*time.Minute, func() (bool, error) {
readyReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
desiredReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if desiredReplicas == currentReplicas && desiredReplicas == readyReplicas {
e2e.Logf("cpms is stable!")
return true, nil
}
e2e.Logf("cpms is not stable, desiredReplicas :%s, currentReplicas:%s, readyReplicas:%s", desiredReplicas, currentReplicas, readyReplicas)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "CPMS is not stable!!!.")
return err == nil
}
// getCPMSAvailabilityZones get zones from cpms
func getCPMSAvailabilityZones(oc *exutil.CLI, iaasPlatform clusterinfra.PlatformType) []string {
var getCPMSAvailabilityZonesJSON string
switch iaasPlatform {
case clusterinfra.AWS:
getCPMSAvailabilityZonesJSON = "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[*].placement.availabilityZone}"
case clusterinfra.Azure, clusterinfra.GCP:
getCPMSAvailabilityZonesJSON = "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains." + iaasPlatform.String() + "[*].zone}"
default:
e2e.Logf("The " + iaasPlatform.String() + " Platform is not supported for now.")
}
availabilityZonesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getCPMSAvailabilityZonesJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
availabilityZones := strings.Split(availabilityZonesStr, " ")
e2e.Logf("availabilityZones:%s", availabilityZones)
return availabilityZones
}
// getZoneAndMachineFromCPMSZones get the zone only have one machine and return the machine name
func getZoneAndMachineFromCPMSZones(oc *exutil.CLI, availabilityZones []string) (int, string, string) {
var key int
var value, machineName string
for key, value = range availabilityZones {
labels := "machine.openshift.io/zone=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
machineNamesStr, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("machines.machine.openshift.io", "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", machineAPINamespace).Output()
if machineNamesStr == "" {
continue
}
machineNames := strings.Split(machineNamesStr, " ")
machineName = machineNames[0]
number := len(machineNames)
if number == 1 {
e2e.Logf("key:%s, failureDomain:%s, master machine name:%s", key, value, machineName)
break
}
}
return key, value, machineName
}
// deleteControlPlaneMachineSet delete the ControlPlaneMachineSet to make it Inactive
func deleteControlPlaneMachineSet(oc *exutil.CLI) {
e2e.Logf("Deleting ControlPlaneMachineSet ...")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("controlplanemachineset", "cluster", "-n", machineAPINamespace, "--wait=false").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// activeControlPlaneMachineSet active the ControlPlaneMachineSet
func activeControlPlaneMachineSet(oc *exutil.CLI) {
e2e.Logf("Active ControlPlaneMachineSet ...")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
if cpmsState != "Active" {
output, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"state":"Active"}}`, "--type=merge", "-n", machineAPINamespace).Output()
e2e.Logf("controlplanemachineset status is: %s, waiting up to 2 seconds, then patch output: %s", cpmsState, output)
return false, nil
}
e2e.Logf("controlplanemachineset is in Active state")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "controlplanemachineset is not in Active state")
}
// replaceOneMasterMachine create a new master machine and delete the old master machine
func replaceOneMasterMachine(oc *exutil.CLI, oldMachineName, newMachineName string) {
e2e.Logf("Creating a new master machine ...")
machineJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machines.machine.openshift.io", oldMachineName, "-n", machineAPINamespace, "-o=json").OutputToFile("mastermachine.json")
o.Expect(err).NotTo(o.HaveOccurred())
bytes, _ := ioutil.ReadFile(machineJSON)
value1, _ := sjson.Set(string(bytes), "metadata.name", newMachineName)
value2, _ := sjson.Set(value1, "spec.providerID", nil)
err = os.WriteFile(machineJSON, []byte(value2), 0o644)
o.Expect(err).NotTo(o.HaveOccurred())
if err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineJSON).Execute(); err != nil {
clusterinfra.DeleteMachine(oc, newMachineName)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
clusterinfra.DeleteMachine(oc, oldMachineName)
clusterinfra.WaitForMachineRunningByName(oc, newMachineName)
clusterinfra.WaitForMachineDisappearByName(oc, oldMachineName)
}
}
// randomMasterMachineName randomly generate a master machine name
func randomMasterMachineName(oldMachineName string) (string, string) {
start := strings.LastIndex(oldMachineName, "-")
newIndex := strconv.Itoa(rand.Intn(100) + 3)
newMachineName := oldMachineName[0:start+1] + newIndex
return "-" + newIndex, newMachineName
}
// getMasterMachineNameBySuffix get the master machine name by suffix
func getMasterMachineNameBySuffix(oc *exutil.CLI, suffix string) string {
currentMasterMachineNames := clusterinfra.ListMasterMachineNames(oc)
for _, value := range currentMasterMachineNames {
if suffix == getMachineSuffix(oc, value) {
return value
}
}
return ""
}
// waitForClusterStable wait cluster to stabilize
func waitForClusterStable(oc *exutil.CLI) {
e2e.Logf("Wait cluster to stabilize ...")
err := wait.Poll(2*time.Minute, 40*time.Minute, func() (bool, error) {
authenticationState, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/authentication", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err1 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
etcdState, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/etcd", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err2 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
kubeapiserverState, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/kube-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err3 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
openshiftapiserverState, err4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/openshift-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err4 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
if strings.Contains(authenticationState, "TrueFalseFalse") && strings.Contains(etcdState, "TrueFalseFalse") && strings.Contains(kubeapiserverState, "TrueFalseFalse") && strings.Contains(openshiftapiserverState, "TrueFalseFalse") {
e2e.Logf("The cluster is stable!")
return true, nil
}
e2e.Logf("The cluster is not stable and waiting up to 2 minutes ...")
return false, nil
})
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Output()
e2e.Logf("%v", output)
exutil.AssertWaitPollNoErr(err, "Wait cluster to stabilize failed.")
}
// getCPMSState get CPMS state is Active or Inactive
func getCPMSState(oc *exutil.CLI) string {
cpmsState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return cpmsState
}
// getArchitectureType get the architecture is arm64 or amd64
func getArchitectureType(oc *exutil.CLI) string {
architecture, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.ListMasterMachineNames(oc)[0]), "-o=jsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return architecture
}
// skipForClusterNotStable skip the test if the cluster is not stable
func skipForClusterNotStable(oc *exutil.CLI) {
authenticationState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/authentication", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
etcdState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/etcd", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
kubeapiserverState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/kube-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
openshiftapiserverState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/openshift-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !(strings.Contains(authenticationState, "TrueFalseFalse") && strings.Contains(etcdState, "TrueFalseFalse") && strings.Contains(kubeapiserverState, "TrueFalseFalse") && strings.Contains(openshiftapiserverState, "TrueFalseFalse")) {
g.Skip("Skip for cluster is not stable!")
}
}
// checkIfCPMSCoIsStable check if some replicas need update, if no replicas need update, return true, else return false
func checkIfCPMSCoIsStable(oc *exutil.CLI) bool {
cpmsState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(cpmsState, "TrueFalseFalse") {
return true
}
message, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set").Output()
e2e.Logf("%v", message)
return false
}
// waitMasterNodeReady wait all master node ready
func waitMasterNodeReady(oc *exutil.CLI) {
err := wait.Poll(1*time.Minute, 5*time.Minute, func() (bool, error) {
masterMachineList := clusterinfra.ListMasterMachineNames(oc)
for _, masterMachineName := range masterMachineList {
nodeName := clusterinfra.GetNodeNameFromMachine(oc, masterMachineName)
readyStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
if readyStatus != "True" {
e2e.Logf("node %s is not ready, status:%s", nodeName, readyStatus)
return false, nil
}
}
e2e.Logf("All master node are ready!")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "wait master node ready failed!")
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 06ecf7e4-4daf-4479-b0f9-a79a07f9e42a | waitForCPMSUpdateCompleted | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func waitForCPMSUpdateCompleted(oc *exutil.CLI, replicas int) {
e2e.Logf("Waiting for the Update completed ...")
timeToWait := time.Duration(replicas*50) * time.Minute
count := 0
err := wait.Poll(1*time.Minute, timeToWait, func() (bool, error) {
count++
if count == 1 {
e2e.Logf("Wait for the update to start and waiting up to 1 minutes ... count %d", count)
return false, nil
}
desiredReplicas, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.replicas}", "-n", machineAPINamespace).Output()
if err1 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
readyReplicas, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
if err2 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
currentReplicas, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.replicas}", "-n", machineAPINamespace).Output()
if err3 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
updatedReplicas, err4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.updatedReplicas}", "-n", machineAPINamespace).Output()
if err4 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ... count %d", count)
return false, nil
}
if desiredReplicas == currentReplicas && desiredReplicas == readyReplicas && desiredReplicas == updatedReplicas {
e2e.Logf("The Update is completed! desiredReplicas is %s, count %d", desiredReplicas, count)
return true, nil
}
e2e.Logf("The Update is still ongoing and waiting up to 1 minutes ... count %d, desiredReplicas is %s,currentReplicas is %s,readyReplicas is %s,updatedReplicas is %s", count, desiredReplicas, currentReplicas, readyReplicas, updatedReplicas)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Wait Update failed.")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 7e1d329d-b21d-47e7-b3ff-4d0cf47dedf1 | skipForCPMSNotStable | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func skipForCPMSNotStable(oc *exutil.CLI) {
readyReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
desiredReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
updatedReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.updatedReplicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !(desiredReplicas == currentReplicas && desiredReplicas == readyReplicas && desiredReplicas == updatedReplicas) {
g.Skip("Skip for cpms is not stable!")
}
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 9243c356-24c7-430a-bab4-c239b749ec4c | printNodeInfo | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func printNodeInfo(oc *exutil.CLI) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
e2e.Logf("%v", output)
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 3bf3792e-f451-428f-bb05-e6a2b9bf1807 | getMachineSuffix | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func getMachineSuffix(oc *exutil.CLI, machineName string) string {
start := strings.LastIndex(machineName, "-")
suffix := machineName[start:]
return suffix
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | f0f488e9-3cb4-48fc-9de2-370069fa9b77 | checkIfCPMSIsStable | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func checkIfCPMSIsStable(oc *exutil.CLI) bool {
err := wait.Poll(20*time.Second, 5*time.Minute, func() (bool, error) {
readyReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.status.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
desiredReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.replicas}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if desiredReplicas == currentReplicas && desiredReplicas == readyReplicas {
e2e.Logf("cpms is stable!")
return true, nil
}
e2e.Logf("cpms is not stable, desiredReplicas :%s, currentReplicas:%s, readyReplicas:%s", desiredReplicas, currentReplicas, readyReplicas)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "CPMS is not stable!!!.")
return err == nil
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 5542a475-1003-4c47-b6ce-66f16a7b0a57 | getCPMSAvailabilityZones | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func getCPMSAvailabilityZones(oc *exutil.CLI, iaasPlatform clusterinfra.PlatformType) []string {
var getCPMSAvailabilityZonesJSON string
switch iaasPlatform {
case clusterinfra.AWS:
getCPMSAvailabilityZonesJSON = "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[*].placement.availabilityZone}"
case clusterinfra.Azure, clusterinfra.GCP:
getCPMSAvailabilityZonesJSON = "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains." + iaasPlatform.String() + "[*].zone}"
default:
e2e.Logf("The " + iaasPlatform.String() + " Platform is not supported for now.")
}
availabilityZonesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getCPMSAvailabilityZonesJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
availabilityZones := strings.Split(availabilityZonesStr, " ")
e2e.Logf("availabilityZones:%s", availabilityZones)
return availabilityZones
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 2eaebb24-f109-4ee9-a7aa-94cb8789346a | getZoneAndMachineFromCPMSZones | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func getZoneAndMachineFromCPMSZones(oc *exutil.CLI, availabilityZones []string) (int, string, string) {
var key int
var value, machineName string
for key, value = range availabilityZones {
labels := "machine.openshift.io/zone=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
machineNamesStr, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("machines.machine.openshift.io", "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", machineAPINamespace).Output()
if machineNamesStr == "" {
continue
}
machineNames := strings.Split(machineNamesStr, " ")
machineName = machineNames[0]
number := len(machineNames)
if number == 1 {
e2e.Logf("key:%s, failureDomain:%s, master machine name:%s", key, value, machineName)
break
}
}
return key, value, machineName
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 3254a373-fcc1-4c26-bd6d-2c26e30dd8db | deleteControlPlaneMachineSet | ['"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func deleteControlPlaneMachineSet(oc *exutil.CLI) {
e2e.Logf("Deleting ControlPlaneMachineSet ...")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("controlplanemachineset", "cluster", "-n", machineAPINamespace, "--wait=false").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 050e36a9-4449-4547-8b6c-3b40947ab1dc | activeControlPlaneMachineSet | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func activeControlPlaneMachineSet(oc *exutil.CLI) {
e2e.Logf("Active ControlPlaneMachineSet ...")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
if cpmsState != "Active" {
output, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"state":"Active"}}`, "--type=merge", "-n", machineAPINamespace).Output()
e2e.Logf("controlplanemachineset status is: %s, waiting up to 2 seconds, then patch output: %s", cpmsState, output)
return false, nil
}
e2e.Logf("controlplanemachineset is in Active state")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "controlplanemachineset is not in Active state")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 3cfd2f28-ff5c-4322-8571-2bc96fb9b673 | replaceOneMasterMachine | ['"io/ioutil"', '"os"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"github.com/tidwall/sjson"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func replaceOneMasterMachine(oc *exutil.CLI, oldMachineName, newMachineName string) {
e2e.Logf("Creating a new master machine ...")
machineJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machines.machine.openshift.io", oldMachineName, "-n", machineAPINamespace, "-o=json").OutputToFile("mastermachine.json")
o.Expect(err).NotTo(o.HaveOccurred())
bytes, _ := ioutil.ReadFile(machineJSON)
value1, _ := sjson.Set(string(bytes), "metadata.name", newMachineName)
value2, _ := sjson.Set(value1, "spec.providerID", nil)
err = os.WriteFile(machineJSON, []byte(value2), 0o644)
o.Expect(err).NotTo(o.HaveOccurred())
if err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineJSON).Execute(); err != nil {
clusterinfra.DeleteMachine(oc, newMachineName)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
clusterinfra.DeleteMachine(oc, oldMachineName)
clusterinfra.WaitForMachineRunningByName(oc, newMachineName)
clusterinfra.WaitForMachineDisappearByName(oc, oldMachineName)
}
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 6487e782-cb76-403c-aa01-fe82ae319bc2 | randomMasterMachineName | ['"math/rand"', '"strconv"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func randomMasterMachineName(oldMachineName string) (string, string) {
start := strings.LastIndex(oldMachineName, "-")
newIndex := strconv.Itoa(rand.Intn(100) + 3)
newMachineName := oldMachineName[0:start+1] + newIndex
return "-" + newIndex, newMachineName
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 9691b476-0a18-447e-9182-533da765aef3 | getMasterMachineNameBySuffix | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func getMasterMachineNameBySuffix(oc *exutil.CLI, suffix string) string {
currentMasterMachineNames := clusterinfra.ListMasterMachineNames(oc)
for _, value := range currentMasterMachineNames {
if suffix == getMachineSuffix(oc, value) {
return value
}
}
return ""
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 0971b475-a9fb-4dfd-9bb9-2ebaed16f4bf | waitForClusterStable | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func waitForClusterStable(oc *exutil.CLI) {
e2e.Logf("Wait cluster to stabilize ...")
err := wait.Poll(2*time.Minute, 40*time.Minute, func() (bool, error) {
authenticationState, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/authentication", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err1 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
etcdState, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/etcd", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err2 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
kubeapiserverState, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/kube-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err3 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
openshiftapiserverState, err4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/openshift-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err4 != nil {
e2e.Logf("The server was unable to return a response and waiting up to 2 minutes ...")
return false, nil
}
if strings.Contains(authenticationState, "TrueFalseFalse") && strings.Contains(etcdState, "TrueFalseFalse") && strings.Contains(kubeapiserverState, "TrueFalseFalse") && strings.Contains(openshiftapiserverState, "TrueFalseFalse") {
e2e.Logf("The cluster is stable!")
return true, nil
}
e2e.Logf("The cluster is not stable and waiting up to 2 minutes ...")
return false, nil
})
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Output()
e2e.Logf("%v", output)
exutil.AssertWaitPollNoErr(err, "Wait cluster to stabilize failed.")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 7b4f3c94-e41b-4fb8-b2df-6297b37f6d91 | getCPMSState | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func getCPMSState(oc *exutil.CLI) string {
cpmsState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return cpmsState
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 095688b8-4773-4b11-8185-74ffca5e413c | getArchitectureType | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func getArchitectureType(oc *exutil.CLI) string {
architecture, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.ListMasterMachineNames(oc)[0]), "-o=jsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return architecture
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 0f44d1c4-3935-431c-9ff9-a546b3be4f1d | skipForClusterNotStable | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func skipForClusterNotStable(oc *exutil.CLI) {
authenticationState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/authentication", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
etcdState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/etcd", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
kubeapiserverState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/kube-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
openshiftapiserverState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/openshift-apiserver", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !(strings.Contains(authenticationState, "TrueFalseFalse") && strings.Contains(etcdState, "TrueFalseFalse") && strings.Contains(kubeapiserverState, "TrueFalseFalse") && strings.Contains(openshiftapiserverState, "TrueFalseFalse")) {
g.Skip("Skip for cluster is not stable!")
}
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 8f3fd30b-6893-4457-acfe-f7531b379201 | checkIfCPMSCoIsStable | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func checkIfCPMSCoIsStable(oc *exutil.CLI) bool {
cpmsState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(cpmsState, "TrueFalseFalse") {
return true
}
message, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set").Output()
e2e.Logf("%v", message)
return false
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | aa51c0b9-3bfe-49a7-a71f-be3e34442264 | waitMasterNodeReady | ['"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cpms_utils.go | func waitMasterNodeReady(oc *exutil.CLI) {
err := wait.Poll(1*time.Minute, 5*time.Minute, func() (bool, error) {
masterMachineList := clusterinfra.ListMasterMachineNames(oc)
for _, masterMachineName := range masterMachineList {
nodeName := clusterinfra.GetNodeNameFromMachine(oc, masterMachineName)
readyStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
if readyStatus != "True" {
e2e.Logf("node %s is not ready, status:%s", nodeName, readyStatus)
return false, nil
}
}
e2e.Logf("All master node are ready!")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "wait master node ready failed!")
} | clusterinfrastructure | ||||
test | openshift/openshift-tests-private | a7cf5bf9-4f12-4607-b9c9-856e381ecdda | machine_healthcheck | import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machine_healthcheck.go | package clusterinfrastructure
import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure MHC MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("machine-healthcheck", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Low-45343-nodeStartupTimeout in MachineHealthCheck should revert back to default [Flaky]", func() {
g.By("Get the default nodeStartupTimeout")
nodeStartupTimeoutBeforeUpdate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMHC, "machine-api-termination-handler", "-o=jsonpath={.spec.nodeStartupTimeout}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("before update --- nodeStartupTimeout: " + nodeStartupTimeoutBeforeUpdate)
g.By("Update nodeStartupTimeout to 30m")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMHC, "machine-api-termination-handler", "-n", machineAPINamespace, "-p", `{"spec":{"nodeStartupTimeout":"30m"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait nodeStartupTimeout revert back to default itself")
err = wait.Poll(30*time.Second, 360*time.Second, func() (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMHC, "machine-api-termination-handler", "-o=jsonpath={.spec.nodeStartupTimeout}", "-n", machineAPINamespace).Output()
if output == "30m" {
e2e.Logf("nodeStartupTimeout is not changed back and waiting up to 30 seconds ...")
return false, nil
}
e2e.Logf("nodeStartupTimeout is changed back")
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Check mhc failed"))
g.By("Get the nodeStartupTimeout should revert back to default")
nodeStartupTimeoutAfterUpdate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMHC, "machine-api-termination-handler", "-o=jsonpath={.spec.nodeStartupTimeout}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("after update --- nodeStartupTimeout: " + nodeStartupTimeoutAfterUpdate)
o.Expect(nodeStartupTimeoutAfterUpdate == nodeStartupTimeoutBeforeUpdate).To(o.BeTrue())
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | ac9150ce-9b5f-432a-8ffb-6e755822d05c | Author:huliu-NonHyperShiftHOST-Low-45343-nodeStartupTimeout in MachineHealthCheck should revert back to default [Flaky] | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machine_healthcheck.go | g.It("Author:huliu-NonHyperShiftHOST-Low-45343-nodeStartupTimeout in MachineHealthCheck should revert back to default [Flaky]", func() {
g.By("Get the default nodeStartupTimeout")
nodeStartupTimeoutBeforeUpdate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMHC, "machine-api-termination-handler", "-o=jsonpath={.spec.nodeStartupTimeout}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("before update --- nodeStartupTimeout: " + nodeStartupTimeoutBeforeUpdate)
g.By("Update nodeStartupTimeout to 30m")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMHC, "machine-api-termination-handler", "-n", machineAPINamespace, "-p", `{"spec":{"nodeStartupTimeout":"30m"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait nodeStartupTimeout revert back to default itself")
err = wait.Poll(30*time.Second, 360*time.Second, func() (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMHC, "machine-api-termination-handler", "-o=jsonpath={.spec.nodeStartupTimeout}", "-n", machineAPINamespace).Output()
if output == "30m" {
e2e.Logf("nodeStartupTimeout is not changed back and waiting up to 30 seconds ...")
return false, nil
}
e2e.Logf("nodeStartupTimeout is changed back")
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Check mhc failed"))
g.By("Get the nodeStartupTimeout should revert back to default")
nodeStartupTimeoutAfterUpdate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMHC, "machine-api-termination-handler", "-o=jsonpath={.spec.nodeStartupTimeout}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("after update --- nodeStartupTimeout: " + nodeStartupTimeoutAfterUpdate)
o.Expect(nodeStartupTimeoutAfterUpdate == nodeStartupTimeoutBeforeUpdate).To(o.BeTrue())
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.