element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | d508a31c-7d05-4c25-8173-625bbb71f5be | staticWatch | ['"context"', '"k8s.io/client-go/informers"', '"k8s.io/client-go/kubernetes"', '"k8s.io/client-go/tools/cache"'] | ['watchInfo'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client-go-utils.go | func staticWatch(ctx context.Context, clientSet kubernetes.Interface, info watchInfo) {
fac := informers.NewSharedInformerFactoryWithOptions(clientSet, 0, informers.WithNamespace(info.namespace))
var informer cache.SharedIndexInformer
switch info.resourceType {
case Service:
informer = fac.Core().V1().Services().Informer()
default:
e2e.Logf("invalid resource type %s, return", string(info.resourceType))
return
}
_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: info.addFunc,
DeleteFunc: info.deleteFunc,
UpdateFunc: info.updateFunc,
})
if err != nil {
e2e.Logf("AddEventHandler err for resource %s %s in %s, err %s, return", string(info.resourceType), info.name, info.namespace, err.Error())
return
}
e2e.Logf("start informer event watch for %s: %s %s", string(info.resourceType), info.namespace, info.name)
informer.Run(ctx.Done())
e2e.Logf("ctx Done %s, exit watching %s: %s %s", ctx.Err(), string(info.resourceType), info.namespace, info.name)
} | hypershift | |||
function | openshift/openshift-tests-private | 10199af4-9444-4475-bb34-7f37873d8aca | startWatchOperator | ['"context"', '"k8s.io/client-go/dynamic"', '"k8s.io/client-go/tools/clientcmd"'] | ['operatorWatchInfo'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client-go-utils.go | func startWatchOperator(ctx context.Context, kubeconfigPath string, info operatorWatchInfo) error {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return err
}
go watchOperator(ctx, dynamicClient, info)
return nil
} | hypershift | |||
function | openshift/openshift-tests-private | 2d162b2a-0b2c-4c1b-a2a6-294f762dc092 | watchOperator | ['"context"', '"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"', '"k8s.io/apimachinery/pkg/runtime/schema"', '"k8s.io/client-go/dynamic"', '"k8s.io/client-go/dynamic/dynamicinformer"', '"k8s.io/client-go/tools/cache"'] | ['operatorWatchInfo'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client-go-utils.go | func watchOperator(ctx context.Context, client dynamic.Interface, info operatorWatchInfo) {
fac := dynamicinformer.NewFilteredDynamicSharedInformerFactory(client, 0, info.namespace, nil)
informer := fac.ForResource(schema.GroupVersionResource{
Group: info.group,
Version: info.version,
Resource: info.resources,
}).Informer()
eventHandler := cache.ResourceEventHandlerFuncs{}
if info.addFunc != nil {
eventHandler.AddFunc = func(obj interface{}) {
typedObj := obj.(*unstructured.Unstructured)
bytes, _ := typedObj.MarshalJSON()
info.addFunc(bytes)
}
}
if info.deleteFunc != nil {
eventHandler.DeleteFunc = func(obj interface{}) {
typedObj := obj.(*unstructured.Unstructured)
bytes, _ := typedObj.MarshalJSON()
info.deleteFunc(bytes)
}
}
if info.updateFunc != nil {
eventHandler.UpdateFunc = func(oldObj interface{}, newObj interface{}) {
typedObj := oldObj.(*unstructured.Unstructured)
oldObjBytes, err := typedObj.MarshalJSON()
if err != nil {
return
}
typedObj = newObj.(*unstructured.Unstructured)
newObjBytes, err := typedObj.MarshalJSON()
if err != nil {
return
}
info.updateFunc(oldObjBytes, newObjBytes)
}
}
_, err := informer.AddEventHandler(eventHandler)
if err != nil {
e2e.Logf("AddEventHandler err for %s %s in %s, err %s, return", info.resources, info.name, info.namespace, err.Error())
return
}
e2e.Logf("start informer event watch for %s.%s %s %s", info.resources, info.group, info.namespace, info.name)
informer.Run(ctx.Done())
e2e.Logf("ctx Done %s, exit watching %s.%s %s %s", ctx.Err(), info.resources, info.group, info.namespace, info.name)
} | hypershift | |||
file | openshift/openshift-tests-private | 25810480-0f63-4d5b-897b-9282710d6117 | client | import (
"bytes"
"fmt"
"io"
"os/exec"
"runtime/debug"
"strings"
g "github.com/onsi/ginkgo/v2"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | package hypershift
import (
"bytes"
"fmt"
"io"
"os/exec"
"runtime/debug"
"strings"
g "github.com/onsi/ginkgo/v2"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type CLI struct {
verb string
username string
bashparm []string
stdin *bytes.Buffer
stdout io.Writer
stderr io.Writer
verbose bool
showInfo bool
skipTLS bool
}
func NewCmdClient() *CLI {
client := &CLI{}
client.username = "admin"
client.showInfo = false
return client
}
func (c *CLI) WithShowInfo(showInfo bool) *CLI {
c.showInfo = showInfo
return c
}
func (c *CLI) WithOutput(out io.Writer) *CLI {
c.stdout = out
return c
}
func (c *CLI) WithStdErr(err io.Writer) *CLI {
c.stderr = err
return c
}
// Run executes given Hypershift command verb
func (c *CLI) Run(args ...string) *CLI {
c.stdin = &bytes.Buffer{}
if c.stdout == nil {
c.stdout = &bytes.Buffer{}
}
if c.stderr == nil {
c.stdout = &bytes.Buffer{}
}
if c.skipTLS {
c.bashparm = append(c.bashparm, "--skip-tls=true")
} else {
c.bashparm = args
}
return c
}
// ExitError returns the error info
type ExitError struct {
Cmd string
StdErr string
*exec.ExitError
}
// FatalErr exits the test in case a fatal error has occurred.
func FatalErr(msg interface{}) {
// the path that leads to this being called isn't always clear...
fmt.Fprintln(g.GinkgoWriter, string(debug.Stack()))
e2e.Failf("%v", msg)
}
// Output executes the command and returns stdout/stderr combined into one string
func (c *CLI) Output() (string, error) {
parms := strings.Join(c.bashparm, " ")
cmd := exec.Command("bash", "-c", parms)
cmd.Stdin = c.stdin
if c.showInfo {
e2e.Logf("Running '%s'", parms)
}
out, err := cmd.CombinedOutput()
trimmed := strings.TrimSpace(string(out))
switch err.(type) {
case nil:
c.stdout = bytes.NewBuffer(out)
return trimmed, nil
case *exec.ExitError:
e2e.Logf("Error running %v:\n%s", cmd, trimmed)
return trimmed, &ExitError{ExitError: err.(*exec.ExitError), Cmd: parms, StdErr: trimmed}
default:
FatalErr(fmt.Errorf("unable to execute %s: %v", parms, err))
// unreachable code
return "", nil
}
}
func (c *CLI) Execute() error {
out, err := c.Output()
e2e.Logf("Command executed successfully with output:\n%s", out)
return err
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 95dfd3b0-6c3b-4f8a-b652-f682c8833fa7 | NewCmdClient | ['CLI'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func NewCmdClient() *CLI {
client := &CLI{}
client.username = "admin"
client.showInfo = false
return client
} | hypershift | ||||
function | openshift/openshift-tests-private | 7f78858a-292a-4b91-82a6-b678ad8d4abe | WithShowInfo | ['CLI'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func (c *CLI) WithShowInfo(showInfo bool) *CLI {
c.showInfo = showInfo
return c
} | hypershift | ||||
function | openshift/openshift-tests-private | 0e40f5f5-27f7-4229-bf5a-d6866b412a20 | WithOutput | ['"io"'] | ['CLI'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func (c *CLI) WithOutput(out io.Writer) *CLI {
c.stdout = out
return c
} | hypershift | |||
function | openshift/openshift-tests-private | d5f448d2-e82c-49fb-b1a6-b100d314bb4c | WithStdErr | ['"io"'] | ['CLI'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func (c *CLI) WithStdErr(err io.Writer) *CLI {
c.stderr = err
return c
} | hypershift | |||
function | openshift/openshift-tests-private | 97fa300b-75d7-4f9e-9c04-ae4ecb4f0a7b | Run | ['"bytes"'] | ['CLI'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func (c *CLI) Run(args ...string) *CLI {
c.stdin = &bytes.Buffer{}
if c.stdout == nil {
c.stdout = &bytes.Buffer{}
}
if c.stderr == nil {
c.stdout = &bytes.Buffer{}
}
if c.skipTLS {
c.bashparm = append(c.bashparm, "--skip-tls=true")
} else {
c.bashparm = args
}
return c
} | hypershift | |||
function | openshift/openshift-tests-private | 02c553de-e052-45bf-85e9-9391f610a5cf | FatalErr | ['"fmt"', '"runtime/debug"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func FatalErr(msg interface{}) {
// the path that leads to this being called isn't always clear...
fmt.Fprintln(g.GinkgoWriter, string(debug.Stack()))
e2e.Failf("%v", msg)
} | hypershift | ||||
function | openshift/openshift-tests-private | 1cd6ef35-0bb7-4666-a496-a1cafa527bc7 | Output | ['"bytes"', '"fmt"', '"os/exec"', '"strings"'] | ['CLI', 'ExitError'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func (c *CLI) Output() (string, error) {
parms := strings.Join(c.bashparm, " ")
cmd := exec.Command("bash", "-c", parms)
cmd.Stdin = c.stdin
if c.showInfo {
e2e.Logf("Running '%s'", parms)
}
out, err := cmd.CombinedOutput()
trimmed := strings.TrimSpace(string(out))
switch err.(type) {
case nil:
c.stdout = bytes.NewBuffer(out)
return trimmed, nil
case *exec.ExitError:
e2e.Logf("Error running %v:\n%s", cmd, trimmed)
return trimmed, &ExitError{ExitError: err.(*exec.ExitError), Cmd: parms, StdErr: trimmed}
default:
FatalErr(fmt.Errorf("unable to execute %s: %v", parms, err))
// unreachable code
return "", nil
}
} | hypershift | |||
function | openshift/openshift-tests-private | 8ae51280-c224-4265-a53f-5d3a5df8664b | Execute | ['CLI'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/client.go | func (c *CLI) Execute() error {
out, err := c.Output()
e2e.Logf("Command executed successfully with output:\n%s", out)
return err
} | hypershift | ||||
file | openshift/openshift-tests-private | 554d0ecf-db05-4ce3-a84a-420ca4788f7b | configmap_machineconfig | import (
"os"
"path/filepath"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/configmap_machineconfig.go | package hypershift
import (
"os"
"path/filepath"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
type configmapMachineConf struct {
Name string `json:"NAME"`
Namespace string `json:"NAMESPACE"`
SSHAuthorizedKeys string `json:"SSH_AUTHORIZED_KEYS"`
Template string
}
func (cm *configmapMachineConf) create(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
vars, err := parseTemplateVarParams(cm)
o.Expect(err).NotTo(o.HaveOccurred())
params := append([]string{"--ignore-unknown-parameters=true", "-f", cm.Template, "-p"}, vars...)
err = cm.applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, params...)
if err != nil {
e2e.Logf("failed to create configmap for machineconfig %s", err.Error())
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cm *configmapMachineConf) delete(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
args := []string{"configmap", cm.Name, "-n", cm.Namespace, "--ignore-not-found"}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cm *configmapMachineConf) applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
return applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, parameters...)
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 67ef5e1c-eada-4d8a-be67-f8e899598035 | create | ['configmapMachineConf'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/configmap_machineconfig.go | func (cm *configmapMachineConf) create(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
vars, err := parseTemplateVarParams(cm)
o.Expect(err).NotTo(o.HaveOccurred())
params := append([]string{"--ignore-unknown-parameters=true", "-f", cm.Template, "-p"}, vars...)
err = cm.applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, params...)
if err != nil {
e2e.Logf("failed to create configmap for machineconfig %s", err.Error())
}
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | ||||
function | openshift/openshift-tests-private | e46272fa-fa7e-432a-9e22-1e6ef55821fa | delete | ['"os"', '"path/filepath"'] | ['configmapMachineConf'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/configmap_machineconfig.go | func (cm *configmapMachineConf) delete(oc *exutil.CLI, kubeconfig, parsedTemplate string) {
defer func() {
path := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+parsedTemplate)
os.Remove(path)
}()
args := []string{"configmap", cm.Name, "-n", cm.Namespace, "--ignore-not-found"}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | c4b59212-4ff0-4f79-a787-26ad97c476e5 | applyResourceFromTemplate | ['configmapMachineConf'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/configmap_machineconfig.go | func (cm *configmapMachineConf) applyResourceFromTemplate(oc *exutil.CLI, kubeconfig, parsedTemplate string, parameters ...string) error {
return applyResourceFromTemplate(oc, kubeconfig, parsedTemplate, parameters...)
} | hypershift | ||||
file | openshift/openshift-tests-private | de10c330-ae0c-4b32-a2e4-7c144c8849e1 | const | import "time" | github.com/openshift/openshift-tests-private/test/extended/hypershift/const.go | package hypershift
import "time"
// OcpClientVerb is the oc verb operation of OCP
type OcpClientVerb = string
/*
oc <OcpClientVerb> resources
*/
const (
OcpGet OcpClientVerb = "get"
OcpPatch OcpClientVerb = "patch"
OcpWhoami OcpClientVerb = "whoami"
OcpDelete OcpClientVerb = "delete"
OcpAnnotate OcpClientVerb = "annotate"
OcpDebug OcpClientVerb = "debug"
OcpExec OcpClientVerb = "exec"
OcpScale OcpClientVerb = "scale"
OcpAdm OcpClientVerb = "adm"
OcpApply OcpClientVerb = "apply"
OcpCreate OcpClientVerb = "create"
OcpLabel OcpClientVerb = "label"
OcpTaint OcpClientVerb = "taint"
OcpExtract OcpClientVerb = "extract"
// nodepoolNameSpace is the namespace where the nodepool CR is always created
nodepoolNameSpace = "clusters"
hypershiftOperatorNamespace = "hypershift"
hypershiftSharedingressNamespace = "hypershift-sharedingress"
ClusterInstallTimeout = 3600 * time.Second
ClusterInstallTimeoutAzure = 2700 * time.Second
DoubleLongTimeout = 1800 * time.Second
LongTimeout = 900 * time.Second
DefaultTimeout = 300 * time.Second
ShortTimeout = 50 * time.Second
)
const (
HyperShiftResourceTagKeyPrefix = "kubernetes.io/cluster/"
HyperShiftResourceTagKeyValue = "owned"
hypershiftNodePoolLabelKey = "hypershift.openshift.io/nodePool"
SupportedPreviousMinorVersions = 2
)
type PlatformType = string
const (
// AWSPlatform represents Amazon Web Services infrastructure.
AWSPlatform PlatformType = "AWS"
// NonePlatform represents user supplied (e.g. bare metal) infrastructure.
NonePlatform PlatformType = "None"
// IBMCloudPlatform represents IBM Cloud infrastructure.
IBMCloudPlatform PlatformType = "IBMCloud"
// AgentPlatform represents user supplied insfrastructure booted with agents.
AgentPlatform PlatformType = "Agent"
// KubevirtPlatform represents Kubevirt infrastructure.
KubevirtPlatform PlatformType = "KubeVirt"
// AzurePlatform represents Azure infrastructure.
AzurePlatform PlatformType = "Azure"
// PowerVSPlatform represents PowerVS infrastructure.
PowerVSPlatform PlatformType = "PowerVS"
)
type AvailabilityPolicy = string
const (
// HighlyAvailable means components should be resilient to problems across
// fault boundaries as defined by the component to which the policy is
// attached. This usually means running critical workloads with 3 replicas and
// with little or no toleration of disruption of the component.
HighlyAvailable AvailabilityPolicy = "HighlyAvailable"
// SingleReplica means components are not expected to be resilient to problems
// across most fault boundaries associated with high availability. This
// usually means running critical workloads with just 1 replica and with
// toleration of full disruption of the component.
SingleReplica AvailabilityPolicy = "SingleReplica"
)
// AWSEndpointAccessType specifies the publishing scope of cluster endpoints.
type AWSEndpointAccessType = string
const (
// Public endpoint access allows public API server access and public node
// communication with the control plane.
Public AWSEndpointAccessType = "Public"
// PublicAndPrivate endpoint access allows public API server access and
// private node communication with the control plane.
PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate"
// Private endpoint access allows only private API server access and private
// node communication with the control plane.
Private AWSEndpointAccessType = "Private"
)
type IdentityProviderType = string
const (
// IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth
IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth"
// IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials
IdentityProviderTypeGitHub IdentityProviderType = "GitHub"
// IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials
IdentityProviderTypeGitLab IdentityProviderType = "GitLab"
// IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials
IdentityProviderTypeGoogle IdentityProviderType = "Google"
// IdentityProviderTypeHTPasswd provides identities from an HTPasswd file
IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd"
// IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials
IdentityProviderTypeKeystone IdentityProviderType = "Keystone"
// IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials
IdentityProviderTypeLDAP IdentityProviderType = "LDAP"
)
const (
// DefaultAWSHyperShiftPrivateSecretFile is the location where AWS private credentials are mounted in Prow CI
DefaultAWSHyperShiftPrivateSecretFile = "/etc/hypershift-pool-aws-credentials/awsprivatecred"
// AWSHyperShiftPrivateSecretFile is the environment variable for the AWS private credentials file path
AWSHyperShiftPrivateSecretFile = "AWS_HYPERSHIFT_PRIVATE_SECRET_FILE"
)
// DNS
const (
hypershiftExternalDNSBaseDomainAWS = "hypershift-ci.qe.devcluster.openshift.com"
hypershiftExternalDNSDomainAWS = "hypershift-ext.qe.devcluster.openshift.com"
hypershiftBaseDomainAzure = "qe.azure.devcluster.openshift.com"
hypershiftExternalDNSDomainAzure = "qe1.azure.devcluster.openshift.com"
)
// cluster infrastructure
const (
machineAPINamespace = "openshift-machine-api"
mapiMachineset = "machinesets.machine.openshift.io"
mapiMachine = "machines.machine.openshift.io"
mapiMHC = "machinehealthchecks.machine.openshift.io"
machineApproverNamespace = "openshift-cluster-machine-approver"
clusterAPINamespace = "openshift-cluster-api"
capiMachineset = "machinesets.cluster.x-k8s.io"
capiMachine = "machines.cluster.x-k8s.io"
capiInfraGroup = "infrastructure.cluster.x-k8s.io"
capiAwsMachineTemplateKind = "AWSMachineTemplate"
npInfraMachineTemplateAnnotationKey = "hypershift.openshift.io/nodePoolPlatformMachineTemplate"
nodeInstanceTypeLabelKey = "node.kubernetes.io/instance-type"
)
// cluster lifecycle
const (
cleanupCloudResAnnotationKey = "hypershift.openshift.io/cleanup-cloud-resources"
destroyGracePeriodAnnotationKey = "hypershift.openshift.io/destroy-grace-period"
)
// Expected to be read-only
var platform2InfraMachineTemplateKind = map[string]string{
AWSPlatform: capiAwsMachineTemplateKind,
}
// node isolation
const (
servingComponentNodesTaintKey = "hypershift.openshift.io/request-serving-component"
servingComponentNodesLabelKey = "hypershift.openshift.io/request-serving-component"
servingComponentPodLabelKey = "hypershift.openshift.io/request-serving-component"
nonServingComponentLabelKey = "hypershift.openshift.io/control-plane"
nonServingComponentTaintKey = nonServingComponentLabelKey
servingComponentNodesTaint = servingComponentNodesTaintKey + "=true:NoSchedule"
servingComponentNodesLabel = servingComponentNodesLabelKey + "=true"
nonServingComponentLabel = nonServingComponentLabelKey + "=true"
nonServingComponentTaint = nonServingComponentTaintKey + "=true:NoSchedule"
osdfmPairedNodeLabelKey = "osd-fleet-manager.openshift.io/paired-nodes"
hypershiftClusterLabelKey = "hypershift.openshift.io/cluster"
hcTopologyAnnotationKey = "hypershift.openshift.io/topology"
hcRequestServingTopologyAnnotation = hcTopologyAnnotationKey + "=dedicated-request-serving-components"
)
// etcd
const (
etcdCmdPrefixForHostedCluster = "ETCDCTL_API=3 etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key"
etcdDiscoverySvcNameForHostedCluster = "etcd-discovery"
etcdClientReqPort = "2379"
etcdLocalClientReqEndpoint = "localhost:" + etcdClientReqPort
)
// kas
const (
kasEncryptionConfigSecretName = "kas-secret-encryption-config"
)
// olm
const (
olmCatalogPlacementManagement = "management"
olmCatalogPlacementGuest = "guest"
)
// auth
const (
podSecurityAdmissionOverrideLabelKey = "hypershift.openshift.io/pod-security-admission-label-override"
)
type podSecurityLevel string
const (
podSecurityRestricted podSecurityLevel = "restricted"
podSecurityBaseline podSecurityLevel = "baseline"
podSecurityPrivileged podSecurityLevel = "privileged"
)
// Enum for hosted cluster service
type hcService string
// Hosted cluster services
const (
hcServiceAPIServer hcService = "APIServer"
hcServiceOAuthServer hcService = "OAuthServer"
hcServiceKonnectivity hcService = "Konnectivity"
hcServiceIgnition hcService = "Ignition"
hcServiceOVNSbDb hcService = "OVNSbDb"
)
// Enum for hosted cluster service types
type hcServiceType string
// Hosted cluster services type
const (
hcServiceTypeLoadBalancer hcServiceType = "LoadBalancer"
hcServiceTypeNodePort hcServiceType = "NodePort"
hcServiceTypeRoute hcServiceType = "Route"
hcServiceTypeNone hcServiceType = "None"
hcServiceTypeS3 hcServiceType = "S3"
)
type K8SResource string
const (
Service K8SResource = "services"
)
type ctxKey string
const (
ctxKeyId ctxKey = "id"
)
// Managed service types
const (
managedServiceKey = "MANAGED_SERVICE"
managedServiceAROHCP = "ARO-HCP"
)
// Dump
const (
dumpArchiveName = "hypershift-dump.tar.gz"
)
| package hypershift | ||||
file | openshift/openshift-tests-private | 8475532a-d348-4f9b-a85b-ece791319491 | etcd | import (
"fmt"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/etcd.go | package hypershift
import (
"fmt"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
func iamRoleTrustPolicyForEtcdBackup(accountId, saIssuer, hcpNs string) string {
trustPolicy := fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::%s:oidc-provider/%s"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"%s:sub": "system:serviceaccount:%s:etcd-backup-sa"
}
}
}
]
}`, accountId, saIssuer, saIssuer, hcpNs)
e2e.Logf("Role trust policy for etcd backup:\n%s", trustPolicy)
return trustPolicy
}
| package hypershift | ||||
function | openshift/openshift-tests-private | 8bf3a229-d28c-40e7-9bb8-a76a3ed2b24d | iamRoleTrustPolicyForEtcdBackup | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/etcd.go | func iamRoleTrustPolicyForEtcdBackup(accountId, saIssuer, hcpNs string) string {
trustPolicy := fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::%s:oidc-provider/%s"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"%s:sub": "system:serviceaccount:%s:etcd-backup-sa"
}
}
}
]
}`, accountId, saIssuer, saIssuer, hcpNs)
e2e.Logf("Role trust policy for etcd backup:\n%s", trustPolicy)
return trustPolicy
} | hypershift | ||||
file | openshift/openshift-tests-private | 08689aed-f937-4217-bc1c-5e22d2ba115e | hostedclusters | import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
"github.com/tidwall/gjson"
clientv3 "go.etcd.io/etcd/client/v3"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/ptr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | package hypershift
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
"github.com/tidwall/gjson"
clientv3 "go.etcd.io/etcd/client/v3"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/ptr"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
type hostedCluster struct {
oc *exutil.CLI
namespace string
name string
hostedClustersKubeconfigFile string
}
func newHostedCluster(oc *exutil.CLI, namespace string, name string) *hostedCluster {
return &hostedCluster{oc: oc, namespace: namespace, name: name}
}
func (h *hostedCluster) getHostedClusterKubeconfigFile() string {
return h.hostedClustersKubeconfigFile
}
func (h *hostedCluster) setHostedClusterKubeconfigFile(kubeconfig string) {
h.hostedClustersKubeconfigFile = kubeconfig
}
// getHostedClusterReadyNodeCount get ready nodes count
// name: npName name, if empty, get all ready nodes' count
func (h *hostedCluster) getHostedClusterReadyNodeCount(npName string) (int, error) {
cond := []string{"--kubeconfig=" + h.hostedClustersKubeconfigFile, "node", "--ignore-not-found", `-ojsonpath='{.items[*].status.conditions[?(@.type=="Ready")].status}'`}
if len(npName) > 0 {
cond = append(cond, "-l", "hypershift.openshift.io/nodePool="+npName)
}
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args(cond...).Output()
if er != nil {
e2e.Logf(" get node status ready error: %v", er)
return 0, er
}
return strings.Count(value, "True"), nil
}
func (h *hostedCluster) pollGetHostedClusterReadyNodeCount(npName string) func() int {
return func() int {
value, err := h.getHostedClusterReadyNodeCount(npName)
o.Expect(err).ShouldNot(o.HaveOccurred())
return value
}
}
func (h *hostedCluster) getHostedClusterInfrastructureTopology() (string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+h.hostedClustersKubeconfigFile, "infrastructure", "cluster", `-o=jsonpath={.status.infrastructureTopology}`).Output()
if er != nil {
e2e.Logf(" get infrastructure/cluster status error: %v", er)
return "", er
}
return value, nil
}
func (h *hostedCluster) pollGetHostedClusterInfrastructureTopology() func() string {
return func() string {
value, _ := h.getHostedClusterInfrastructureTopology()
return value
}
}
func (h *hostedCluster) getInfraID() (string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", h.namespace, h.name, `-ojsonpath={.spec.infraID}`).Output()
if er != nil {
e2e.Logf("get InfraID, error occurred: %v", er)
return "", er
}
return value, nil
}
func (h *hostedCluster) getResourceGroupName() (string, error) {
infraId, err := h.getInfraID()
if err != nil {
return "", err
}
return h.name + "-" + infraId, nil
}
func (h *hostedCluster) getClustersDeletionTimestamp() (string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("clusters", "-n", h.namespace+"-"+h.name, "--ignore-not-found", `-ojsonpath={.items[].metadata.deletionTimestamp}`).Output()
if er != nil {
e2e.Logf("get ClusterDeletionTimestamp, error occurred: %v", er)
return "", er
}
return value, nil
}
func (h *hostedCluster) getHostedComponentNamespace() string {
return fmt.Sprintf("%s-%s", h.namespace, h.name)
}
// Warning: the returned default SG ID could be empty
func (h *hostedCluster) getDefaultSgId() string {
return doOcpReq(h.oc, OcpGet, false, "hc", h.name, "-n", h.namespace, "-o=jsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}")
}
func (h *hostedCluster) getSvcPublishingStrategyType(svc hcService) hcServiceType {
jsonPath := fmt.Sprintf(`-o=jsonpath={.spec.services[?(@.service=="%s")].servicePublishingStrategy.type}`, svc)
return hcServiceType(doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace, jsonPath))
}
func (h *hostedCluster) getControlPlaneEndpointPort() string {
return doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace, `-o=jsonpath={.status.controlPlaneEndpoint.port}`)
}
func (h *hostedCluster) hostedClustersReady() (bool, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", h.namespace, "--ignore-not-found", h.name, `-ojsonpath='{.status.conditions[?(@.type=="Available")].status}'`).Output()
if er != nil {
e2e.Logf("error occurred to get Available: %v, try next round", er)
return false, er
}
if !strings.Contains(value, "True") {
return false, fmt.Errorf("Available != True")
}
value, er = h.oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", h.namespace, "--ignore-not-found", h.name, `-ojsonpath={.status.version.history[?(@.state!="")].state}`).Output()
if er != nil {
e2e.Logf("error occurred to get PROGRESS: %v, try next round", er)
return false, er
}
if !strings.Contains(value, "Completed") {
return false, fmt.Errorf("PROGRESS != Completed")
}
return true, nil
}
func (h *hostedCluster) pollHostedClustersReady() func() bool {
return func() bool {
value, _ := h.hostedClustersReady()
return value
}
}
func (h *hostedCluster) getHostedClustersHACPWorkloadNames(workloadType string) ([]string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args(workloadType, "-n", h.namespace+"-"+h.name, `-ojsonpath={.items[?(@.spec.replicas>1)].metadata.name}`).Output()
if er != nil {
e2e.Logf("get HA HostedClusters Workload Names, error occurred: %v", er)
return nil, er
}
return strings.Split(value, " "), nil
}
func (h *hostedCluster) isCPPodOnlyRunningOnOneNode(nodeName string) (bool, error) {
value, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", h.namespace+"-"+h.name, `-ojsonpath={.items[?(@.spec.nodeName!="`+nodeName+`")].metadata.name}`).Output()
if err != nil {
e2e.Logf("check HostedClusters CP PodOnly One Node, error occurred: %v", err)
return false, err
}
if len(value) == 0 {
return true, nil
}
e2e.Logf("not on %s node pod name:%s", nodeName, value)
if len(strings.Split(value, " ")) == 1 && strings.Contains(value, "ovnkube") {
return true, nil
}
return false, nil
}
func (h *hostedCluster) pollIsCPPodOnlyRunningOnOneNode(nodeName string) func() bool {
return func() bool {
value, _ := h.isCPPodOnlyRunningOnOneNode(nodeName)
return value
}
}
func (h *hostedCluster) getAzureDiskSizeGBByNodePool(nodePool string) string {
return doOcpReq(h.oc, OcpGet, false, "nodepools", "-n", h.namespace, nodePool, `-ojsonpath={.spec.platform.azure.diskSizeGB}`)
}
func (h *hostedCluster) getAzureSubnetId() string {
return doOcpReq(h.oc, OcpGet, false, "hc", h.name, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.subnetID}")
}
func (h *hostedCluster) pollGetNodePoolReplicas() func() string {
return func() string {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepools", "-n", h.namespace, `-ojsonpath={.items[*].status.replicas}`).Output()
if er != nil {
return ""
}
return value
}
}
func getHostedClusters(oc *exutil.CLI, namespace string) (string, error) {
value, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
if er != nil {
e2e.Logf("get HostedClusters, error occurred: %v", er)
return "", er
}
return value, nil
}
func pollGetHostedClusters(oc *exutil.CLI, namespace string) func() string {
return func() string {
value, _ := getHostedClusters(oc, namespace)
return value
}
}
// checkHCConditions check conditions and exit test if not available.
func (h *hostedCluster) checkHCConditions() bool {
iaasPlatform := exutil.CheckPlatform(h.oc)
res, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("hostedcluster", h.name, "-n", h.namespace,
`-ojsonpath={range .status.conditions[*]}{@.type}{" "}{@.status}{" "}{end}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if iaasPlatform == "azure" {
return checkSubstringWithNoExit(res,
[]string{"ValidHostedControlPlaneConfiguration True", "ClusterVersionSucceeding True",
"Degraded False", "EtcdAvailable True", "KubeAPIServerAvailable True", "InfrastructureReady True",
"Available True", "ValidConfiguration True", "SupportedHostedCluster True",
"ValidHostedControlPlaneConfiguration True", "IgnitionEndpointAvailable True", "ReconciliationActive True",
"ValidReleaseImage True", "ReconciliationSucceeded True"})
} else {
return checkSubstringWithNoExit(res,
[]string{"ValidHostedControlPlaneConfiguration True", "ClusterVersionSucceeding True",
"Degraded False", "EtcdAvailable True", "KubeAPIServerAvailable True", "InfrastructureReady True",
"Available True", "ValidConfiguration True", "SupportedHostedCluster True",
"ValidHostedControlPlaneConfiguration True", "IgnitionEndpointAvailable True", "ReconciliationActive True",
"ValidReleaseImage True", "ReconciliationSucceeded True"})
}
}
func (h *hostedCluster) checkNodepoolAllConditions(npName string) func() bool {
return func() bool {
res := doOcpReq(h.oc, OcpGet, true, "nodepools", "-n", h.namespace, npName, `-ojsonpath={range .status.conditions[*]}{@.type}{" "}{@.status}{" "}{end}`)
return checkSubstringWithNoExit(res, []string{"AutoscalingEnabled False", "UpdateManagementEnabled True", "ValidReleaseImage True", "ValidPlatformImage True", "AWSSecurityGroupAvailable True", "ValidMachineConfig True", "ValidGeneratedPayload True", "ReachedIgnitionEndpoint True", "ValidTuningConfig True", "ReconciliationActive True", "AllMachinesReady True", "AllNodesHealthy True", "Ready True"})
}
}
// getHostedclusterConsoleInfo returns console url and password
// the first return is console url
// the second return is password of kubeadmin
func (h *hostedCluster) getHostedclusterConsoleInfo() (string, string) {
url, cerr := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpWhoami).Args("--show-console").Output()
o.Expect(cerr).ShouldNot(o.HaveOccurred())
pwdbase64, pswerr := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "secret",
"kubeadmin-password", "-ojsonpath={.data.password}").Output()
o.Expect(pswerr).ShouldNot(o.HaveOccurred())
pwd, err := base64.StdEncoding.DecodeString(pwdbase64)
o.Expect(err).ShouldNot(o.HaveOccurred())
return url, string(pwd)
}
func (h *hostedCluster) createAwsNodePool(name string, nodeCount int) {
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool aws --name %s --namespace %s --cluster-name %s --node-count %d",
name, h.namespace, h.name, nodeCount)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (h *hostedCluster) createAwsInPlaceNodePool(name string, nodeCount int, dir string) {
npFile := dir + "/np-inplace.yaml"
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool aws --name %s --namespace %s --cluster-name %s --node-count %d --render > %s", name, h.namespace, h.name, nodeCount, npFile)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
cmdSed := fmt.Sprintf("sed -i 's/Replace/InPlace/g' %s", npFile)
_, err = bashClient.Run(cmdSed).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
err = h.oc.AsAdmin().WithoutNamespace().Run(OcpCreate).Args("-f", npFile).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (h *hostedCluster) getAzureNodePoolImageType(npName string) azureNodepoolImageType {
return azureNodepoolImageType(doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureImageType}"))
}
func (h *hostedCluster) getAzureDefaultNodePoolImageType() azureNodepoolImageType {
return h.getAzureNodePoolImageType(h.name)
}
func (h *hostedCluster) getAzureNodePoolImageId(npName string) string {
return doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.imageID}")
}
func (h *hostedCluster) getAzureDefaultNodePoolImageId() string {
return h.getAzureNodePoolImageId(h.name)
}
func (h *hostedCluster) getAzureNodePoolMarketplaceImage(npName string) *azureMarketplaceImage {
return &azureMarketplaceImage{
Offer: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.offer}"),
Publisher: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.publisher}"),
SKU: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.sku}"),
Version: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.version}"),
}
}
func (h *hostedCluster) getAzureDefaultNodePoolMarketplaceImage() *azureMarketplaceImage {
return h.getAzureNodePoolMarketplaceImage(h.name)
}
func (h *hostedCluster) createAdditionalAzureNodePool(name string, nodeCount int) {
np := NewAzureNodePool(name, h.name, h.namespace).WithNodeCount(ptr.To(nodeCount)).WithSubnetId(h.getAzureSubnetId())
switch imageType := h.getAzureDefaultNodePoolImageType(); imageType {
case azureNodepoolImageTypeId:
np.WithImageId(h.getAzureDefaultNodePoolImageId())
case azureNodepoolImageTypeMarketplace:
np.WithMarketplaceImage(h.getAzureDefaultNodePoolMarketplaceImage())
default:
e2e.Failf("Unknown Azure Nodepool image type: %s", imageType)
}
np.CreateAzureNodePool()
}
func (h *hostedCluster) deleteNodePool(name string) {
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpDelete).Args("--ignore-not-found", "nodepool", "-n", h.namespace, name).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (h *hostedCluster) checkNodePoolReady(name string) bool {
//check condition {Ready:True}
readyCond := `-ojsonpath={.status.conditions[?(@.type=="Ready")].status}`
isReady, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("np", "-n", h.namespace, name, readyCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(isReady, "True") {
e2e.Logf("nodePool ready condition: %s", isReady)
return false
}
//check condition {AutoscalingEnabled:True/False}
autoScalCond := `-ojsonpath={.status.conditions[?(@.type=="AutoscalingEnabled")].status}`
autoscaleEnabled, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("np", "-n", h.namespace, name, autoScalCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
//if not autoscaleEnabled, check replicas is as expected
if autoscaleEnabled != "True" {
desiredNodes, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("np", "-n", h.namespace, name, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
currentNodes, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("np", "-n", h.namespace, name, "-o=jsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return desiredNodes == currentNodes
}
return true
}
func (h *hostedCluster) pollCheckHostedClustersNodePoolReady(name string) func() bool {
return func() bool {
return h.checkNodePoolReady(name)
}
}
func (h *hostedCluster) setNodepoolAutoScale(name, max, min string) {
removeNpConfig := `[{"op": "remove", "path": "/spec/replicas"}]`
autoscaleConfig := fmt.Sprintf(`--patch={"spec": {"autoScaling": {"max": %s, "min":%s}}}`, max, min)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, "--type=json", "-p", removeNpConfig).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
_, err = h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, autoscaleConfig, "--type=merge").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
// getNodepoolNodeName get hosted cluster node names by labels
// labelFileter: ${key1}={value1},${key2}={value2} e.g.hypershift.openshift.io/nodePool=hypershift-ci-22374-us-east-2a
func (h *hostedCluster) getHostedClusterNodeNameByLabelFilter(labelFilter string) string {
nameCond := `-ojsonpath={.items[*].metadata.name}`
nodesName, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args("node", "--ignore-not-found", "-l", labelFilter, nameCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return nodesName
}
func (h *hostedCluster) getHostedClusterNodeReadyStatus(nodeName string) string {
labelFilter := "kubernetes.io/hostname=" + nodeName
readyCond := `-ojsonpath={.items[].status.conditions[?(@.type=="Ready")].status}`
status, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args("node", "--ignore-not-found", "-l", labelFilter, readyCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return status
}
// setNodepoolAutoRepair set spec.management.autoRepair value
// enabled: true or false
func (h *hostedCluster) setNodepoolAutoRepair(name, enabled string) {
autoRepairConfig := fmt.Sprintf(`--patch={"spec": {"management": {"autoRepair": %s}}}`, enabled)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, autoRepairConfig, "--type=merge").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (h *hostedCluster) pollCheckNodepoolAutoRepairDisabled(name string) func() bool {
return func() bool {
return h.checkNodepoolAutoRepairDisabled(name)
}
}
func (h *hostedCluster) checkNodepoolAutoRepairDisabled(name string) bool {
//check nodeool status
autoRepairCond := `-ojsonpath={.status.conditions[?(@.type=="AutorepairEnabled")].status}`
rc, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, autoRepairCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.Contains(rc, "True") {
return false
}
//check mhc should not exist
mchCAPI := "machinehealthchecks.cluster.x-k8s.io"
rc, err = h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, name, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(rc) > 0 {
return false
}
return true
}
func (h *hostedCluster) pollCheckNodepoolAutoRepairEnabled(name string) func() bool {
return func() bool {
return h.checkNodepoolAutoRepairEnabled(name)
}
}
func (h *hostedCluster) checkNodepoolAutoRepairEnabled(name string) bool {
//check nodeool status
autoRepairCond := `-ojsonpath={.status.conditions[?(@.type=="AutorepairEnabled")].status}`
rc, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, autoRepairCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(rc, "True") {
return false
}
//get np replica
npReplica, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, "-ojsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
//check mhc currentHealthy, mch name is same with nodepool name
mchCAPI := "machinehealthchecks.cluster.x-k8s.io"
currentHealthyNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, name, "-ojsonpath={.status.currentHealthy}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return npReplica == currentHealthyNum
}
func (h *hostedCluster) pollCheckNodeHealthByMHC(mhcName string) func() bool {
return func() bool {
return h.checkNodeHealthByMHC(mhcName)
}
}
// checkNodeHealthByMHC checks if "Expected Machines" is same with "Current Healthy" in MHC
func (h *hostedCluster) checkNodeHealthByMHC(mhcName string) bool {
mchCAPI := "machinehealthchecks.cluster.x-k8s.io"
expectedMachineCond := `-ojsonpath={.status.expectedMachines}`
expectedMachineNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, mhcName, expectedMachineCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
currentHealthyCond := `-ojsonpath={.status.currentHealthy}`
currentHealthyNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, mhcName, currentHealthyCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return expectedMachineNum == currentHealthyNum
}
func (h *hostedCluster) pollCheckDeletedNodePool(npName string) func() bool {
return func() bool {
return h.checkDeletedNodePool(npName)
}
}
func (h *hostedCluster) checkDeletedNodePool(npName string) bool {
rc, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "np", npName, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(strings.TrimSpace(rc)) > 0 {
return false
}
params := []string{"no", "--ignore-not-found", "-l", "hypershift.openshift.io/nodePool=" + npName}
rc, err = h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(strings.TrimSpace(rc)) > 0 {
return false
}
return true
}
func (h *hostedCluster) pollCheckNodepoolCurrentNodes(name, expected string) func() bool {
return func() bool {
return h.checkNodepoolCurrentNodes(name, expected)
}
}
func (h *hostedCluster) checkNodepoolCurrentNodes(name, expected string) bool {
currentNodes, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", "-n", h.namespace, name, "-o=jsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return currentNodes == expected
}
func (h *hostedCluster) isNodepoolAutosaclingEnabled(name string) bool {
autoScalCond := `-ojsonpath={.status.conditions[?(@.type=="AutoscalingEnabled")].status}`
autoscaleEnabled, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", "-n", h.namespace, name, autoScalCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return strings.Contains(autoscaleEnabled, "True")
}
func (h *hostedCluster) pollCheckAllNodepoolReady() func() bool {
return func() bool {
return h.checkAllNodepoolReady()
}
}
func (h *hostedCluster) checkAllNodepoolReady() bool {
nodeReadyCond := fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].status.conditions[?(@.type=="Ready")].status}`, h.name)
nodesStatus, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", nodeReadyCond, "--namespace", h.namespace).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(nodesStatus) <= 0 {
return true
}
if strings.Contains(nodesStatus, "False") {
return false
}
return true
}
type nodePoolCondition struct {
conditionsType string
conditionsTypeReq string
expectConditionsResult string
}
func (h *hostedCluster) pollCheckNodePoolConditions(npName string, conditions []nodePoolCondition) func() bool {
return func() bool {
return h.checkNodePoolConditions(npName, conditions)
}
}
func (h *hostedCluster) checkNodePoolConditions(npName string, conditions []nodePoolCondition) bool {
o.Expect(doOcpReq(h.oc, OcpGet, true, "nodepools", "-n", h.namespace, "-ojsonpath={.items[*].metadata.name}")).Should(o.ContainSubstring(npName))
for _, condition := range conditions {
res := doOcpReq(h.oc, OcpGet, false, "nodepools", npName, "-n", h.namespace, fmt.Sprintf(`-ojsonpath={.status.conditions[?(@.type=="%s")].%s}`, condition.conditionsType, condition.conditionsTypeReq))
e2e.Logf("checkNodePoolStatus: %s, %s, expected: %s, res: %s", condition.conditionsType, condition.conditionsTypeReq, condition.expectConditionsResult, res)
if !strings.Contains(res, condition.expectConditionsResult) {
return false
}
}
return true
}
func (h *hostedCluster) getNodepoolPayload(name string) string {
return doOcpReq(h.oc, OcpGet, true, "nodepools", name, "-n", h.namespace, `-ojsonpath={.spec.release.image}`)
}
func (h *hostedCluster) getNodepoolStatusPayloadVersion(name string) string {
payloadVersionCond := `-ojsonpath={.status.version}`
version, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", name, "-n", h.namespace, payloadVersionCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return version
}
func (h *hostedCluster) upgradeNodepoolPayloadInPlace(name, payload string) {
doOcpReq(h.oc, OcpPatch, true, "nodepools", name, "-n", h.namespace, "--type=json", fmt.Sprintf(`-p=[{"op": "replace", "path": "/spec/release/image","value": "%s"}]`, payload))
}
func (h *hostedCluster) pollCheckUpgradeNodepoolPayload(name, expectPayload, version string) func() bool {
return func() bool {
curPayload := h.getNodepoolPayload(name)
if strings.Contains(curPayload, expectPayload) {
v := h.getNodepoolStatusPayloadVersion(name)
if strings.Contains(v, version) {
return true
}
}
return false
}
}
// getCPReleaseImage return the .spec.release.image of hostedcluster
// it is set by user and can be treated as expected release
func (h *hostedCluster) getCPReleaseImage() string {
payload, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name,
`-ojsonpath={.spec.release.image}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return payload
}
// getCPPayload return current hosted cluster actual payload
func (h *hostedCluster) getCPPayloadTag() string {
payload, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name,
`-ojsonpath={.status.version.history[?(@.state=="Completed")].version}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
// for multi payloads just use the first one
return strings.TrimSpace(strings.Split(payload, " ")[0])
}
// getCPDesiredPayload return desired payload in status
func (h *hostedCluster) getCPDesiredPayload() string {
payload, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name,
`-ojsonpath={.status.version.desired.image}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return payload
}
func (h *hostedCluster) upgradeCPPayload(payload string) {
patchOption := fmt.Sprintf(`-p=[{"op": "replace", "path": "/spec/release/image","value": "%s"}]`, payload)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "hostedcluster", h.name,
"--type=json", patchOption).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (h *hostedCluster) pollCheckUpgradeCPPayload(payload string) func() bool {
return func() bool {
curPayload := h.getCPPayloadTag()
if strings.Contains(payload, curPayload) {
return true
}
return false
}
}
func (h *hostedCluster) isFIPEnabled() bool {
res, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name, "-ojsonpath={.spec.fips}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
enable, err := strconv.ParseBool(res)
o.Expect(err).ShouldNot(o.HaveOccurred())
return enable
}
// checkFIPInHostedCluster check FIP settings in hosted cluster nodes
func (h *hostedCluster) checkFIPInHostedCluster() bool {
nodes, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpGet).Args("no", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
for _, nodename := range strings.Split(nodes, " ") {
res, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args("node/"+nodename, "-q", "--", "chroot", "/host", "fips-mode-setup", "--check").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(res, "FIPS mode is enabled") {
e2e.Logf("Warning: node %s fips-mode-setup check FIP false", nodename)
return false
}
res, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args("node/"+nodename, "-q", "--", "cat", "/proc/sys/crypto/fips_enabled").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(res, "1") {
e2e.Logf("Warning: node %s /proc/sys/crypto/fips_enabled != 1", nodename)
return false
}
res, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args("node/"+nodename, "-q", "--", "sysctl", "crypto.fips_enabled").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(res, "crypto.fips_enabled = 1") {
e2e.Logf("Warning: node %s crypto.fips_enabled != 1", nodename)
return false
}
}
return true
}
func (h *hostedCluster) isCPHighlyAvailable() bool {
res, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name, "-ojsonpath={.spec.controllerAvailabilityPolicy}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return strings.Contains(res, HighlyAvailable)
}
// checkAWSRooVolumes check aws root-volume configurations,
// checkItems: iops, size, type
func (h *hostedCluster) checkAWSRootVolumes(name string, checkItem string, expected interface{}) bool {
awsmachineVolumeJSONPathPtn := `-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.rootVolume.%s}`
awsmachineVolumeFilter := fmt.Sprintf(awsmachineVolumeJSONPathPtn, h.namespace, name, checkItem)
nodepoolVolumeFilter := fmt.Sprintf("-ojsonpath={.spec.platform.aws.rootVolume.%s}", checkItem)
var expectedVal string
switch v := expected.(type) {
case string:
expectedVal = v
case int64:
expectedVal = strconv.FormatInt(v, 10)
case *int64:
expectedVal = strconv.FormatInt(*v, 10)
default:
e2e.Logf("Error: not supported expected value while checking aws nodepool root-volume config")
return false
}
//check nodepool
rootVolumeConfig, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("np", name, "-n", h.namespace, nodepoolVolumeFilter).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.TrimSpace(rootVolumeConfig) != expectedVal {
e2e.Logf("Error: nodepool %s rootVolume item %s not matched: return %s and expect %s, original expected %v", name, checkItem, rootVolumeConfig, expectedVal, expected)
return false
}
//check awsmachine
awsRootVolumeConfig, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("awsmachines", "-n", h.namespace+"-"+h.name, awsmachineVolumeFilter).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.TrimSpace(awsRootVolumeConfig) != expectedVal {
e2e.Logf("Error: awsmachine for nodepool %s rootVolume item %s not matched: return %s and expect %s, original expected %v", name, checkItem, awsRootVolumeConfig, expectedVal, expected)
return false
}
return true
}
func (h *hostedCluster) checkAWSNodepoolRootVolumeSize(name string, expectedSize int64) bool {
return h.checkAWSRootVolumes(name, "size", expectedSize)
}
func (h *hostedCluster) checkAWSNodepoolRootVolumeIOPS(name string, expectedIOPS int64) bool {
return h.checkAWSRootVolumes(name, "iops", expectedIOPS)
}
func (h *hostedCluster) checkAWSNodepoolRootVolumeType(name string, expectedType string) bool {
return h.checkAWSRootVolumes(name, "type", expectedType)
}
func (h *hostedCluster) setAWSNodepoolInstanceType(name, instanceType string) {
cond := fmt.Sprintf(`--patch={"spec": {"platform": {"aws": {"instanceType":"%s"}}}}`, instanceType)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, cond, "--type=merge").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
}
func (h *hostedCluster) getAWSNodepoolInstanceType(name string) string {
cond := `-ojsonpath={.spec.platform.aws.instanceType}`
instanceType, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).ShouldNot(o.BeEmpty())
return instanceType
}
func (h *hostedCluster) getNodepoolUpgradeType(name string) string {
cond := `-ojsonpath={.spec.management.upgradeType}`
instanceType, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).ShouldNot(o.BeEmpty())
return instanceType
}
func (h *hostedCluster) pollCheckAWSNodepoolInstanceType(name, expected string) func() bool {
return func() bool {
return h.checkAWSNodepoolInstanceType(name, expected)
}
}
func (h *hostedCluster) checkAWSNodepoolInstanceType(name, expected string) bool {
// check nodepool instanceType
instanceType := h.getAWSNodepoolInstanceType(name)
if instanceType != expected {
e2e.Logf("instanceType not matched, expected: %s, got: %s", expected, instanceType)
return false
}
// check awsmachinetemplates instanceType
cond := `-ojsonpath={.spec.template.spec.instanceType}`
templateInstanceType, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "awsmachinetemplates", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(templateInstanceType).ShouldNot(o.BeEmpty())
return templateInstanceType == expected
}
func (h *hostedCluster) pollCheckNodepoolRollingUpgradeIntermediateStatus(name string) func() bool {
return func() bool {
return h.checkNodepoolRollingUpgradeIntermediateStatus(name)
}
}
func (h *hostedCluster) checkNodepoolRollingUpgradeIntermediateStatus(name string) bool {
// check machinedeployment UNAVAILABLE nodes should not be zero
infraID, err := h.getInfraID()
o.Expect(err).ShouldNot(o.HaveOccurred())
cond := `-ojsonpath={.status.unavailableReplicas}`
unavailableNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "machinedeployment", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(unavailableNum).ShouldNot(o.BeEmpty())
num, err := strconv.Atoi(unavailableNum)
o.Expect(err).ShouldNot(o.HaveOccurred())
if num <= 0 {
return false
}
// get machinesets.cluster.x-k8s.io according to nodepool
machinesetCAPI := "machinesets.cluster.x-k8s.io"
labelFilter := "cluster.x-k8s.io/cluster-name=" + infraID
format := `-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].metadata.name}`
cond = fmt.Sprintf(format, h.namespace, name)
machinesets, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, machinesetCAPI, "-l", labelFilter, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(machinesets).ShouldNot(o.BeEmpty())
// a new machineset should be created, so number of machinesets should be 2
if len(strings.Split(machinesets, " ")) <= 1 {
return false
}
return true
}
func (h *hostedCluster) pollCheckNodepoolRollingUpgradeComplete(name string) func() bool {
return func() bool {
return h.checkNodepoolRollingUpgradeComplete(name)
}
}
func (h *hostedCluster) checkNodepoolRollingUpgradeComplete(name string) bool {
if !h.checkNodepoolRollingUpgradeCompleteByMachineDeployment(name) {
e2e.Logf("checkNodepoolRollingUpgradeCompleteByMachineDeployment false")
return false
}
if !h.checkNodePoolReady(name) {
e2e.Logf("checkNodePoolReady false")
return false
}
if !h.checkNodepoolHostedClusterNodeReady(name) {
e2e.Logf("checkNodepoolHostedClusterNodeReady false")
return false
}
return true
}
func (h *hostedCluster) getNodepoolReadyReplicas(name string) int {
// get nodepool ready replics
replicas, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, "-ojsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
replicasNum, err := strconv.Atoi(replicas)
o.Expect(err).ShouldNot(o.HaveOccurred())
return replicasNum
}
func (h *hostedCluster) getNodepoolHostedClusterReadyNodesNumber(name string) int {
params := []string{"node", "--ignore-not-found", "-l", "hypershift.openshift.io/nodePool=" + name, `-ojsonpath={.items[*].status.conditions[?(@.type=="Ready")].status}`}
status, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("get").Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
readyNodeNum := strings.Count(status, "True")
return readyNodeNum
}
// getNodepoolHostedClusterNodes gets hosted cluster ready nodes by nodepool label filer
// name: nodepool name
func (h *hostedCluster) getNodepoolHostedClusterNodes(name string) []string {
params := []string{"node", "--ignore-not-found", "-l", "hypershift.openshift.io/nodePool=" + name, `-ojsonpath={.items[*].metadata.name}`}
nameList, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(strings.TrimSpace(nameList)) <= 0 {
return []string{}
}
return strings.Split(nameList, " ")
}
func (h *hostedCluster) getHostedClusterNodeInstanceType(nodeName string) string {
params := []string{"node", nodeName, "--ignore-not-found", `-ojsonpath={.metadata.labels.beta\.kubernetes\.io/instance-type}`}
instanceType, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).ShouldNot(o.BeEmpty())
return instanceType
}
func (h *hostedCluster) checkNodepoolHostedClusterNodeReady(name string) bool {
replicasNum := h.getNodepoolReadyReplicas(name)
readyNodeNum := h.getNodepoolHostedClusterReadyNodesNumber(name)
return replicasNum == readyNodeNum
}
func (h *hostedCluster) checkNodepoolRollingUpgradeCompleteByMachineDeployment(name string) bool {
// check machinedeployment status
cond := `-ojsonpath={.status}`
statusStr, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "machinedeployment", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(statusStr).ShouldNot(o.BeEmpty())
status := gjson.Parse(statusStr).Value().(map[string]interface{})
var unavailable, replicas, ready, updated interface{}
var ok bool
//check unavailableReplicas should be zero
unavailable, ok = status["unavailableReplicas"]
o.Expect(ok).Should(o.BeTrue())
unavailableNum, err := strconv.Atoi(fmt.Sprint(unavailable))
o.Expect(err).ShouldNot(o.HaveOccurred())
if unavailableNum != 0 {
return false
}
//check replicas == ready == updated
replicas, ok = status["replicas"]
o.Expect(ok).Should(o.BeTrue())
replicaNum, err := strconv.Atoi(fmt.Sprint(replicas))
o.Expect(err).ShouldNot(o.HaveOccurred())
ready, ok = status["readyReplicas"]
o.Expect(ok).Should(o.BeTrue())
readyNum, err := strconv.Atoi(fmt.Sprint(ready))
o.Expect(err).ShouldNot(o.HaveOccurred())
updated, ok = status["updatedReplicas"]
o.Expect(ok).Should(o.BeTrue())
updatedNum, err := strconv.Atoi(fmt.Sprint(updated))
o.Expect(err).ShouldNot(o.HaveOccurred())
if replicaNum != readyNum || replicaNum != updatedNum {
return false
}
return true
}
func (h *hostedCluster) checkNodepoolHostedClusterNodeInstanceType(npName string) bool {
expected := h.getAWSNodepoolInstanceType(npName)
replicas := h.getNodepoolReadyReplicas(npName)
nodes := h.getNodepoolHostedClusterNodes(npName)
o.Expect(len(nodes)).Should(o.Equal(replicas))
for _, name := range nodes {
instanceType := h.getHostedClusterNodeInstanceType(name)
if instanceType != expected {
e2e.Logf("hosted cluster node %s instanceType: %s is not expected %s", name, instanceType, expected)
return false
}
}
return true
}
// getEtcdLeader return etcd leader pod name and follower name list
func (h *hostedCluster) getCPEtcdLeaderAndFollowers() (string, []string, error) {
var leader string
var followers []string
etcdEndpointStatusCmd := "ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 endpoint status"
replicas := doOcpReq(h.oc, OcpGet, true, "-n", h.namespace+"-"+h.name, "sts", "etcd", `-ojsonpath={.spec.replicas}`)
totalNum, err := strconv.Atoi(replicas)
o.Expect(err).ShouldNot(o.HaveOccurred())
for i := 0; i < totalNum; i++ {
podName := "etcd-" + strconv.Itoa(i)
res, err := exutil.RemoteShPodWithBashSpecifyContainer(h.oc, h.namespace+"-"+h.name, podName, "etcd", etcdEndpointStatusCmd)
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("endpoint status %s", res)
arr := strings.Split(res, ",")
o.Expect(len(arr) > 5).Should(o.BeTrue())
if strings.TrimSpace(arr[4]) == "true" {
if leader != "" {
return "", []string{}, fmt.Errorf("multiple leaders found error")
}
leader = podName
} else {
followers = append(followers, podName)
}
}
if leader == "" {
return "", []string{}, fmt.Errorf("no leader found error")
}
return leader, followers, nil
}
func (h *hostedCluster) getEtcdNodeMapping() map[string]string {
replicas := doOcpReq(h.oc, OcpGet, true, "-n", h.namespace+"-"+h.name, "sts", "etcd", `-ojsonpath={.spec.replicas}`)
totalNum, err := strconv.Atoi(replicas)
o.Expect(err).ShouldNot(o.HaveOccurred())
etcdNodeMap := make(map[string]string, 1)
for i := 0; i < totalNum; i++ {
etcdPod := "etcd-" + strconv.Itoa(i)
node := doOcpReq(h.oc, OcpGet, true, "-n", h.namespace+"-"+h.name, "pod", etcdPod, `-ojsonpath={.spec.nodeName}`)
etcdNodeMap[etcdPod] = node
}
return etcdNodeMap
}
func (h *hostedCluster) isCPEtcdPodHealthy(podName string) bool {
etcdEndpointHealthCmd := "ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/etcd-ca/ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 endpoint health"
res, err := exutil.RemoteShPodWithBashSpecifyContainer(h.oc, h.namespace+"-"+h.name, podName, "etcd", etcdEndpointHealthCmd)
if err != nil {
e2e.Logf("CP ETCD %s is unhealthy with error : %s , \n res: %s", podName, err.Error(), res)
return false
}
if strings.Contains(res, "unhealthy") {
return false
}
return true
}
func (h *hostedCluster) getNodeNameByNodepool(npName string) []string {
labelFilter := "hypershift.openshift.io/nodePool=" + npName
nodes := h.getHostedClusterNodeNameByLabelFilter(labelFilter)
return strings.Split(strings.TrimSpace(nodes), " ")
}
func (h *hostedCluster) getUnstructuredNodePoolByName(ctx context.Context, npName string) (*unstructured.Unstructured, error) {
// Dynamically obtain the gvr to avoid version change in the future
npRESTMapping, err := h.oc.RESTMapper().RESTMapping(schema.GroupKind{
Group: "hypershift.openshift.io",
Kind: "NodePool",
})
if err != nil {
return nil, fmt.Errorf("error getting RESTMapping for hypershift.openshift.io/NodePool: %w", err)
}
npUnstructured, err := h.oc.AdminDynamicClient().Resource(npRESTMapping.Resource).Namespace(h.namespace).Get(ctx, npName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting NodePool/%s: %w", npName, err)
}
hcName, found, err := unstructured.NestedString(npUnstructured.Object, "spec", "clusterName")
if err != nil || !found {
return nil, fmt.Errorf("error extracting NodePool.spec.clusterName: %w", err)
}
if hcName != h.name {
return nil, fmt.Errorf("expect NodePool.spec.clusterName to be %s but found to be %s", h.name, hcName)
}
return npUnstructured, nil
}
func (h *hostedCluster) getCurrentInfraMachineTemplatesByNodepool(ctx context.Context, npName string) (*unstructured.Unstructured, error) {
npUnstructured, err := h.getUnstructuredNodePoolByName(ctx, npName)
if err != nil {
return nil, fmt.Errorf("error getting unstructured NodePool %s: %w", npName, err)
}
platform, found, err := unstructured.NestedString(npUnstructured.Object, "spec", "platform", "type")
if err != nil || !found {
return nil, fmt.Errorf("error extracting NodePool.spec.platform.type: %w", err)
}
e2e.Logf("Found NodePool/%s platform = %s", npName, platform)
infraMachineTemplateKind, ok := platform2InfraMachineTemplateKind[platform]
if !ok {
return nil, fmt.Errorf("no infra machine template kind for platform %s. Available options: %v", platform, platform2InfraMachineTemplateKind)
}
e2e.Logf("Found infra machine template kind = %s", infraMachineTemplateKind)
infraMachineTemplateRESTMapping, err := h.oc.RESTMapper().RESTMapping(schema.GroupKind{
Group: capiInfraGroup,
Kind: infraMachineTemplateKind,
})
if err != nil {
return nil, fmt.Errorf("error getting RESTMapping for kind %s in group %s: %w", infraMachineTemplateKind, capiInfraGroup, err)
}
hcpNs := h.getHostedComponentNamespace()
if len(hcpNs) == 0 {
return nil, errors.New("empty hosted component namespace obtained from the hostedCluster object")
}
infraMachineTempName, ok := npUnstructured.GetAnnotations()[npInfraMachineTemplateAnnotationKey]
if !ok {
return nil, fmt.Errorf("annotation %s not found on NodePool %s", npInfraMachineTemplateAnnotationKey, npName)
}
e2e.Logf("Found infra machine template name = %s", infraMachineTempName)
infraMachineTempUnstructured, err := h.oc.AdminDynamicClient().Resource(infraMachineTemplateRESTMapping.Resource).Namespace(hcpNs).Get(ctx, infraMachineTempName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting infra machine templates %s: %w", infraMachineTempName, err)
}
e2e.Logf("Found infra machine template %s", infraMachineTempUnstructured.GetName())
return infraMachineTempUnstructured, nil
}
func (h *hostedCluster) DebugHostedClusterNodeWithChroot(caseID string, nodeName string, cmd ...string) (string, error) {
newNamespace := names.SimpleNameGenerator.GenerateName(fmt.Sprintf("hypershift-%s-", caseID))
defer func() {
err := h.oc.AsAdmin().AsGuestKubeconf().Run("delete").Args("namespace", newNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpCreate).Args("namespace", newNamespace).Output()
if err != nil {
return "", err
}
res, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpGet).Args("ns/"+newNamespace, `-o=jsonpath={.metadata.labels.pod-security\.kubernetes\.io/enforce}`).Output()
if err != nil {
return "", err
}
if !strings.Contains(res, "privileged") {
_, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run("label").Args("ns/"+newNamespace, `security.openshift.io/scc.podSecurityLabelSync=false`, `pod-security.kubernetes.io/enforce=privileged`, `pod-security.kubernetes.io/audit=privileged`, `pod-security.kubernetes.io/warn=privileged`, "--overwrite").Output()
if err != nil {
return "", err
}
}
res, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args(append([]string{"node/" + nodeName, "--to-namespace=" + newNamespace, "-q", "--", "chroot", "/host"}, cmd...)...).Output()
return res, err
}
func (h *hostedCluster) updateHostedClusterAndCheck(oc *exutil.CLI, updateFunc func() error, deployment string) {
oldVersion := doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.metadata.annotations.deployment\.kubernetes\.io/revision}`)
err := updateFunc()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Eventually(func() string {
return doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.metadata.annotations.deployment\.kubernetes\.io/revision}`)
}, DefaultTimeout, DefaultTimeout/10).ShouldNot(o.Equal(oldVersion), deployment+" not restart")
o.Eventually(func() int {
return strings.Compare(doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.status.replicas}`), doOcpReq(oc, OcpGet, true, "deployment", deployment, "-n", h.namespace+"-"+h.name, `-ojsonpath={.status.readyReplicas}`))
}, LongTimeout, LongTimeout/10).Should(o.Equal(0), deployment+" is not ready")
}
// idpType: HTPasswd, GitLab, GitHub ...
func (h *hostedCluster) checkIDPConfigReady(idpType IdentityProviderType, idpName string, secretName string) bool {
//check idpType by idpName
if idpType != doOcpReq(h.oc, OcpGet, false, "hostedcluster", h.name, "-n", h.namespace, "--ignore-not-found", fmt.Sprintf(`-ojsonpath={.spec.configuration.oauth.identityProviders[?(@.name=="%s")].type}`, idpName)) {
return false
}
//check configmap oauth-openshift
configYaml := doOcpReq(h.oc, OcpGet, false, "configmap", "oauth-openshift", "-n", h.namespace+"-"+h.name, "--ignore-not-found", `-ojsonpath={.data.config\.yaml}`)
if !strings.Contains(configYaml, fmt.Sprintf("name: %s", idpName)) {
return false
}
if !strings.Contains(configYaml, fmt.Sprintf("kind: %sIdentityProvider", idpType)) {
return false
}
//check secret name if secretName is not empty
if secretName != "" {
volumeName := doOcpReq(h.oc, OcpGet, false, "deploy", "oauth-openshift", "-n", h.namespace+"-"+h.name, "--ignore-not-found", fmt.Sprintf(`-ojsonpath={.spec.template.spec.volumes[?(@.secret.secretName=="%s")].name}`, secretName))
if !strings.Contains(volumeName, "idp-secret") {
return false
}
}
return true
}
// idpType: HTPasswd, GitLab, GitHub ...
func (h *hostedCluster) pollCheckIDPConfigReady(idpType IdentityProviderType, idpName string, secretName string) func() bool {
return func() bool {
return h.checkIDPConfigReady(idpType, idpName, secretName)
}
}
type etcdEndpointStatusResult []struct {
Endpoint string `json:"Endpoint"`
Status *clientv3.StatusResponse `json:"Status"`
}
// getEtcdEndpointStatus gets status of the passed-in endpoints of the hosted cluster's ETCD.
// Omit the endpoints parameter to get status of all endpoints.
func (h *hostedCluster) getEtcdEndpointStatus(endpoints ...string) (etcdEndpointStatusResult, error) {
var etcdEndpointStatusCmd string
if len(endpoints) == 0 {
etcdEndpointStatusCmd = etcdCmdPrefixForHostedCluster + " --endpoints " + etcdLocalClientReqEndpoint + " endpoint status --cluster -w json"
} else {
etcdEndpointStatusCmd = etcdCmdPrefixForHostedCluster + " --endpoints " + strings.Join(endpoints, ",") + " endpoint status -w json"
}
endpointStatus := doOcpReq(h.oc, OcpExec, true, "-n", h.getHostedComponentNamespace(), "etcd-0", "-c", "etcd", "--", "bash", "-c", etcdEndpointStatusCmd)
e2e.Logf("Etcd endpoint status response = %s", endpointStatus)
var res etcdEndpointStatusResult
if err := json.Unmarshal([]byte(endpointStatus), &res); err != nil {
return nil, err
}
return res, nil
}
// getEtcdEndpointDbStatsByIdx gets DB status of an ETCD endpoint
func (h *hostedCluster) getEtcdEndpointDbStatsByIdx(idx int) (dbSize, dbSizeInUse int64, dbFragRatio float64, err error) {
var localEtcdEndpointStatus etcdEndpointStatusResult
etcdEndpoint := h.getEtcdDiscoveryEndpointForClientReqByIdx(idx)
if localEtcdEndpointStatus, err = h.getEtcdEndpointStatus(etcdEndpoint); err != nil {
return -1, -1, 0, fmt.Errorf("error querying local ETCD endpoint status: %w", err)
}
dbSize, dbSizeInUse = localEtcdEndpointStatus[0].Status.DbSize, localEtcdEndpointStatus[0].Status.DbSizeInUse
if dbSize == 0 {
return -1, -1, 0, errors.New("zero dbSize obtained from ETCD server's response")
}
if dbSizeInUse == 0 {
return -1, -1, 0, errors.New("zero dbSizeInUse obtained from ETCD server's response")
}
fragRatio := float64(dbSize-dbSizeInUse) / float64(dbSize)
e2e.Logf("Found ETCD endpoint %s: dbSize = %d, dbSizeInUse = %d, fragmentation ratio = %.2f", etcdEndpoint, dbSize, dbSizeInUse, fragRatio)
return dbSize, dbSizeInUse, fragRatio, nil
}
func (h *hostedCluster) getEtcdDiscoveryEndpointForClientReqByIdx(idx int) string {
hcpNs := h.getHostedComponentNamespace()
return fmt.Sprintf("etcd-%d.%s.%s.svc:%s", idx, etcdDiscoverySvcNameForHostedCluster, hcpNs, etcdClientReqPort)
}
func (h *hostedCluster) checkHCSpecForAzureEtcdEncryption(expected azureKMSKey, isBackupKey bool) {
keyPath := "activeKey"
if isBackupKey {
keyPath = "backupKey"
}
keyName := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
fmt.Sprintf("-o=jsonpath={.spec.secretEncryption.kms.azure.%s.keyName}", keyPath))
o.Expect(keyName).To(o.Equal(expected.keyName))
keyVaultName := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
fmt.Sprintf("-o=jsonpath={.spec.secretEncryption.kms.azure.%s.keyVaultName}", keyPath))
o.Expect(keyVaultName).To(o.Equal(expected.keyVaultName))
keyVersion := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
fmt.Sprintf("-o=jsonpath={.spec.secretEncryption.kms.azure.%s.keyVersion}", keyPath))
o.Expect(keyVersion).To(o.Equal(expected.keyVersion))
}
func (h *hostedCluster) checkKASEncryptionConfiguration() {
kasSecretEncryptionConfigSecret := doOcpReq(h.oc, OcpExtract, true,
fmt.Sprintf("secret/%s", kasEncryptionConfigSecretName), "-n", h.getHostedComponentNamespace(), "--to", "-")
o.Expect(kasSecretEncryptionConfigSecret).To(o.And(
o.ContainSubstring("secrets"),
o.ContainSubstring("configmaps"),
o.ContainSubstring("routes"),
o.ContainSubstring("oauthaccesstokens"),
o.ContainSubstring("oauthauthorizetokens"),
))
}
func (h *hostedCluster) checkSecretEncryptionDecryption(isEtcdEncrypted bool) {
var (
secretName = fmt.Sprintf("etcd-encryption-%s", strings.ToLower(exutil.RandStrDefault()))
secretNs = "default"
secretKey = "foo"
secretValue = "bar"
)
e2e.Logf("Creating secret/%s within ns/%s of the hosted cluster", secretName, secretNs)
doOcpReq(h.oc.AsGuestKubeconf(), OcpCreate, true, "secret", "generic", secretName,
"-n", secretNs, fmt.Sprintf("--from-literal=%s=%s", secretKey, secretValue))
e2e.Logf("Checking secret decryption")
decryptedSecretContent := doOcpReq(h.oc.AsGuestKubeconf(), OcpExtract, true,
fmt.Sprintf("secret/%s", secretName), "-n", secretNs, "--to", "-")
o.Expect(decryptedSecretContent).To(o.And(
o.ContainSubstring(secretKey),
o.ContainSubstring(secretValue),
))
// Unencrypted secrets look like the following:
// /kubernetes.io/secrets/default/test-secret.<secret-content>
// Encrypted secrets look like the following:
// /kubernetes.io/secrets/default/test-secret.k8s:enc:kms:v1:<EncryptionConfiguration-provider-name>:.<encrypted-content>
if !isEtcdEncrypted {
return
}
e2e.Logf("Checking ETCD encryption")
etcdCmd := fmt.Sprintf("%s --endpoints %s get /kubernetes.io/secrets/%s/%s | hexdump -C | awk -F '|' '{print $2}' OFS= ORS=",
etcdCmdPrefixForHostedCluster, etcdLocalClientReqEndpoint, secretNs, secretName)
encryptedSecretContent, err := exutil.RemoteShPodWithBashSpecifyContainer(h.oc, h.getHostedComponentNamespace(),
"etcd-0", "etcd", etcdCmd)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get encrypted secret content within ETCD")
o.Expect(encryptedSecretContent).NotTo(o.BeEmpty(), "obtained empty encrypted secret content")
o.Expect(encryptedSecretContent).NotTo(o.ContainSubstring(secretValue))
e2e.Logf("Deleting secret")
_ = h.oc.AsGuestKubeconf().Run(OcpDelete).Args("secret", secretName, "-n", secretNs).Execute()
}
// Health checks an HC on Azure with ETCD encryption
func (h *hostedCluster) checkAzureEtcdEncryption(activeKey azureKMSKey, backupKey *azureKMSKey) {
e2e.Logf("Checking hc.spec.secretEncryption.kms.azure.activeKey")
h.checkHCSpecForAzureEtcdEncryption(activeKey, false)
if backupKey != nil {
e2e.Logf("Checking hc.spec.secretEncryption.kms.azure.backupKey")
h.checkHCSpecForAzureEtcdEncryption(*backupKey, true)
}
e2e.Logf("Checking the ValidAzureKMSConfig condition of the hc")
validAzureKMSConfigStatus := doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace,
`-o=jsonpath={.status.conditions[?(@.type == "ValidAzureKMSConfig")].status}`)
o.Expect(validAzureKMSConfigStatus).To(o.Equal("True"))
e2e.Logf("Checking KAS EncryptionConfiguration")
h.checkKASEncryptionConfiguration()
e2e.Logf("Checking secret encryption/decryption within the hosted cluster")
h.checkSecretEncryptionDecryption(true)
}
func (h *hostedCluster) waitForKASDeployUpdate(ctx context.Context, oldResourceVersion string) {
kasDeployKindAndName := "deploy/kube-apiserver"
err := exutil.WaitForResourceUpdate(ctx, h.oc, DefaultTimeout/20, DefaultTimeout,
kasDeployKindAndName, h.getHostedComponentNamespace(), oldResourceVersion)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to wait for KAS deployment to be updated")
}
func (h *hostedCluster) waitForKASDeployReady(ctx context.Context) {
kasDeployName := "kube-apiserver"
exutil.WaitForDeploymentsReady(ctx, func(ctx context.Context) (*appsv1.DeploymentList, error) {
return h.oc.AdminKubeClient().AppsV1().Deployments(h.getHostedComponentNamespace()).List(ctx, metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", kasDeployName).String(),
})
}, exutil.IsDeploymentReady, LongTimeout, LongTimeout/20, true)
}
func (h *hostedCluster) patchAzureKMS(activeKey, backupKey *azureKMSKey) {
patch, err := getHCPatchForAzureKMS(activeKey, backupKey)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get HC patch Azure KMS")
doOcpReq(h.oc, OcpPatch, true, "hc", "-n", h.namespace, h.name, "--type=merge", "-p", patch)
}
func (h *hostedCluster) removeAzureKMSBackupKey() {
doOcpReq(h.oc, OcpPatch, true, "hc", h.name, "-n", h.namespace, "--type=json",
"-p", `[{"op": "remove", "path": "/spec/secretEncryption/kms/azure/backupKey"}]`)
}
// Re-encode all secrets within a hosted cluster namespace
func (h *hostedCluster) encodeSecretsNs(ctx context.Context, ns string) {
guestKubeClient := h.oc.GuestKubeClient()
secrets, err := guestKubeClient.CoreV1().Secrets(ns).List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list secrets")
backoff := wait.Backoff{Steps: 10, Duration: 1 * time.Second}
for _, secret := range secrets.Items {
err = retry.RetryOnConflict(backoff, func() error {
// Fetch the latest version of the secret
latestSecret, getErr := guestKubeClient.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{})
if getErr != nil {
return getErr
}
// Update the secret with the modified version
_, updateErr := guestKubeClient.CoreV1().Secrets(ns).Update(ctx, latestSecret, metav1.UpdateOptions{})
return updateErr
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to update secret with retry")
}
}
// Re-encode all secrets within the hosted cluster
func (h *hostedCluster) encodeSecrets(ctx context.Context) {
namespaces, err := h.oc.GuestKubeClient().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list namespaces")
for _, ns := range namespaces.Items {
h.encodeSecretsNs(ctx, ns.Name)
}
}
// Re-encode all configmaps within a hosted cluster namespace
func (h *hostedCluster) encodeConfigmapsNs(ctx context.Context, ns string) {
guestKubeClient := h.oc.GuestKubeClient()
configmaps, err := guestKubeClient.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list configmaps")
backoff := wait.Backoff{Steps: 10, Duration: 1 * time.Second}
for _, configmap := range configmaps.Items {
err = retry.RetryOnConflict(backoff, func() error {
// Fetch the latest version of the configmap
latestConfigmap, getErr := guestKubeClient.CoreV1().ConfigMaps(ns).Get(ctx, configmap.Name, metav1.GetOptions{})
if getErr != nil {
return getErr
}
// Update the configmap with the modified version
_, updateErr := guestKubeClient.CoreV1().ConfigMaps(ns).Update(ctx, latestConfigmap, metav1.UpdateOptions{})
return updateErr
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to update configmap with retry")
}
}
// Re-encode all configmaps within the hosted cluster
func (h *hostedCluster) encodeConfigmaps(ctx context.Context) {
namespaces, err := h.oc.GuestKubeClient().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list namespaces")
for _, ns := range namespaces.Items {
h.encodeConfigmapsNs(ctx, ns.Name)
}
}
func (h *hostedCluster) pollUntilReady() {
o.Eventually(h.pollHostedClustersReady(), ClusterInstallTimeout, ClusterInstallTimeout/20).Should(o.BeTrue())
}
func (h *hostedCluster) getKASResourceVersion() string {
return doOcpReq(h.oc, OcpGet, true, "deploy/kube-apiserver", "-n", h.getHostedComponentNamespace(), "-o=jsonpath={.metadata.resourceVersion}")
}
func (h *hostedCluster) getOLMCatalogPlacement() string {
return doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace, "-o=jsonpath={.spec.olmCatalogPlacement}")
}
| package hypershift | ||||
function | openshift/openshift-tests-private | dd182abd-9487-4c17-b7b0-5d300e20bb68 | newHostedCluster | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func newHostedCluster(oc *exutil.CLI, namespace string, name string) *hostedCluster {
return &hostedCluster{oc: oc, namespace: namespace, name: name}
} | hypershift | ||||
function | openshift/openshift-tests-private | fed67597-d6fa-4a57-b7cf-fe149179ed0e | getHostedClusterKubeconfigFile | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClusterKubeconfigFile() string {
return h.hostedClustersKubeconfigFile
} | hypershift | ||||
function | openshift/openshift-tests-private | 63a0ab58-38e8-4a91-9491-5199e205e50d | setHostedClusterKubeconfigFile | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) setHostedClusterKubeconfigFile(kubeconfig string) {
h.hostedClustersKubeconfigFile = kubeconfig
} | hypershift | ||||
function | openshift/openshift-tests-private | 4d1ecf37-da4d-47ea-b41d-a5e792bc8fa0 | getHostedClusterReadyNodeCount | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClusterReadyNodeCount(npName string) (int, error) {
cond := []string{"--kubeconfig=" + h.hostedClustersKubeconfigFile, "node", "--ignore-not-found", `-ojsonpath='{.items[*].status.conditions[?(@.type=="Ready")].status}'`}
if len(npName) > 0 {
cond = append(cond, "-l", "hypershift.openshift.io/nodePool="+npName)
}
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args(cond...).Output()
if er != nil {
e2e.Logf(" get node status ready error: %v", er)
return 0, er
}
return strings.Count(value, "True"), nil
} | hypershift | |||
function | openshift/openshift-tests-private | c7291072-5428-462a-8c7d-64279f978c0d | pollGetHostedClusterReadyNodeCount | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollGetHostedClusterReadyNodeCount(npName string) func() int {
return func() int {
value, err := h.getHostedClusterReadyNodeCount(npName)
o.Expect(err).ShouldNot(o.HaveOccurred())
return value
}
} | hypershift | ||||
function | openshift/openshift-tests-private | cbd43612-25c6-41b0-8ea5-8ed0df40cf26 | getHostedClusterInfrastructureTopology | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClusterInfrastructureTopology() (string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+h.hostedClustersKubeconfigFile, "infrastructure", "cluster", `-o=jsonpath={.status.infrastructureTopology}`).Output()
if er != nil {
e2e.Logf(" get infrastructure/cluster status error: %v", er)
return "", er
}
return value, nil
} | hypershift | ||||
function | openshift/openshift-tests-private | cd91fa9c-deb6-4d8a-8e7a-02c46e7ee0b6 | pollGetHostedClusterInfrastructureTopology | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollGetHostedClusterInfrastructureTopology() func() string {
return func() string {
value, _ := h.getHostedClusterInfrastructureTopology()
return value
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 26e92c06-4978-457f-9366-4bd1cd20f6c8 | getInfraID | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getInfraID() (string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", h.namespace, h.name, `-ojsonpath={.spec.infraID}`).Output()
if er != nil {
e2e.Logf("get InfraID, error occurred: %v", er)
return "", er
}
return value, nil
} | hypershift | ||||
function | openshift/openshift-tests-private | 2014957c-a87f-43a5-9546-cc0d860221f3 | getResourceGroupName | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getResourceGroupName() (string, error) {
infraId, err := h.getInfraID()
if err != nil {
return "", err
}
return h.name + "-" + infraId, nil
} | hypershift | ||||
function | openshift/openshift-tests-private | 4efd4b47-405b-4e8b-bac3-b2b1e9943756 | getClustersDeletionTimestamp | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getClustersDeletionTimestamp() (string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("clusters", "-n", h.namespace+"-"+h.name, "--ignore-not-found", `-ojsonpath={.items[].metadata.deletionTimestamp}`).Output()
if er != nil {
e2e.Logf("get ClusterDeletionTimestamp, error occurred: %v", er)
return "", er
}
return value, nil
} | hypershift | ||||
function | openshift/openshift-tests-private | 1fbd39c7-f8e4-4129-82f9-26e64d2497c1 | getHostedComponentNamespace | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedComponentNamespace() string {
return fmt.Sprintf("%s-%s", h.namespace, h.name)
} | hypershift | |||
function | openshift/openshift-tests-private | 083650d3-8c25-49cf-b09f-e9e63a80d705 | getDefaultSgId | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getDefaultSgId() string {
return doOcpReq(h.oc, OcpGet, false, "hc", h.name, "-n", h.namespace, "-o=jsonpath={.status.platform.aws.defaultWorkerSecurityGroupID}")
} | hypershift | ||||
function | openshift/openshift-tests-private | b1caa28c-9714-4a9e-b777-ffe8d821c3b2 | getSvcPublishingStrategyType | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getSvcPublishingStrategyType(svc hcService) hcServiceType {
jsonPath := fmt.Sprintf(`-o=jsonpath={.spec.services[?(@.service=="%s")].servicePublishingStrategy.type}`, svc)
return hcServiceType(doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace, jsonPath))
} | hypershift | |||
function | openshift/openshift-tests-private | 12e74308-5291-413a-8349-572b530a8e91 | getControlPlaneEndpointPort | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getControlPlaneEndpointPort() string {
return doOcpReq(h.oc, OcpGet, true, "hc", h.name, "-n", h.namespace, `-o=jsonpath={.status.controlPlaneEndpoint.port}`)
} | hypershift | ||||
function | openshift/openshift-tests-private | bb39c3ae-56f6-4754-89e0-784f2c2a36a9 | hostedClustersReady | ['"fmt"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) hostedClustersReady() (bool, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", h.namespace, "--ignore-not-found", h.name, `-ojsonpath='{.status.conditions[?(@.type=="Available")].status}'`).Output()
if er != nil {
e2e.Logf("error occurred to get Available: %v, try next round", er)
return false, er
}
if !strings.Contains(value, "True") {
return false, fmt.Errorf("Available != True")
}
value, er = h.oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", h.namespace, "--ignore-not-found", h.name, `-ojsonpath={.status.version.history[?(@.state!="")].state}`).Output()
if er != nil {
e2e.Logf("error occurred to get PROGRESS: %v, try next round", er)
return false, er
}
if !strings.Contains(value, "Completed") {
return false, fmt.Errorf("PROGRESS != Completed")
}
return true, nil
} | hypershift | |||
function | openshift/openshift-tests-private | b1b85541-efe5-4f6a-8cb6-82b0ef174a1e | pollHostedClustersReady | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollHostedClustersReady() func() bool {
return func() bool {
value, _ := h.hostedClustersReady()
return value
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 31c820c9-6274-4633-9976-06ba5cb2e323 | getHostedClustersHACPWorkloadNames | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClustersHACPWorkloadNames(workloadType string) ([]string, error) {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args(workloadType, "-n", h.namespace+"-"+h.name, `-ojsonpath={.items[?(@.spec.replicas>1)].metadata.name}`).Output()
if er != nil {
e2e.Logf("get HA HostedClusters Workload Names, error occurred: %v", er)
return nil, er
}
return strings.Split(value, " "), nil
} | hypershift | |||
function | openshift/openshift-tests-private | 934b396e-1c61-4ae8-af4b-7eabbd51f04e | isCPPodOnlyRunningOnOneNode | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) isCPPodOnlyRunningOnOneNode(nodeName string) (bool, error) {
value, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", h.namespace+"-"+h.name, `-ojsonpath={.items[?(@.spec.nodeName!="`+nodeName+`")].metadata.name}`).Output()
if err != nil {
e2e.Logf("check HostedClusters CP PodOnly One Node, error occurred: %v", err)
return false, err
}
if len(value) == 0 {
return true, nil
}
e2e.Logf("not on %s node pod name:%s", nodeName, value)
if len(strings.Split(value, " ")) == 1 && strings.Contains(value, "ovnkube") {
return true, nil
}
return false, nil
} | hypershift | |||
function | openshift/openshift-tests-private | 36254229-150c-4fd0-a4be-fab261e542f6 | pollIsCPPodOnlyRunningOnOneNode | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollIsCPPodOnlyRunningOnOneNode(nodeName string) func() bool {
return func() bool {
value, _ := h.isCPPodOnlyRunningOnOneNode(nodeName)
return value
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 9d778bc4-c8cd-4255-abb3-3d47d41b2256 | getAzureDiskSizeGBByNodePool | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureDiskSizeGBByNodePool(nodePool string) string {
return doOcpReq(h.oc, OcpGet, false, "nodepools", "-n", h.namespace, nodePool, `-ojsonpath={.spec.platform.azure.diskSizeGB}`)
} | hypershift | ||||
function | openshift/openshift-tests-private | 95610ece-cb2d-4fcb-a69d-5d804830c866 | getAzureSubnetId | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureSubnetId() string {
return doOcpReq(h.oc, OcpGet, false, "hc", h.name, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.subnetID}")
} | hypershift | ||||
function | openshift/openshift-tests-private | 47e1e4c3-0cae-45e4-8842-4293327f45ab | pollGetNodePoolReplicas | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollGetNodePoolReplicas() func() string {
return func() string {
value, er := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepools", "-n", h.namespace, `-ojsonpath={.items[*].status.replicas}`).Output()
if er != nil {
return ""
}
return value
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 9d9e8bcb-9b27-46d8-b709-abd2ce70bce0 | getHostedClusters | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func getHostedClusters(oc *exutil.CLI, namespace string) (string, error) {
value, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
if er != nil {
e2e.Logf("get HostedClusters, error occurred: %v", er)
return "", er
}
return value, nil
} | hypershift | |||||
function | openshift/openshift-tests-private | c8aa93c9-f246-4e06-876e-b92c3c929599 | pollGetHostedClusters | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func pollGetHostedClusters(oc *exutil.CLI, namespace string) func() string {
return func() string {
value, _ := getHostedClusters(oc, namespace)
return value
}
} | hypershift | |||||
function | openshift/openshift-tests-private | f382a5d6-f40a-463f-8da8-ef7a69229019 | checkHCConditions | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkHCConditions() bool {
iaasPlatform := exutil.CheckPlatform(h.oc)
res, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("hostedcluster", h.name, "-n", h.namespace,
`-ojsonpath={range .status.conditions[*]}{@.type}{" "}{@.status}{" "}{end}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if iaasPlatform == "azure" {
return checkSubstringWithNoExit(res,
[]string{"ValidHostedControlPlaneConfiguration True", "ClusterVersionSucceeding True",
"Degraded False", "EtcdAvailable True", "KubeAPIServerAvailable True", "InfrastructureReady True",
"Available True", "ValidConfiguration True", "SupportedHostedCluster True",
"ValidHostedControlPlaneConfiguration True", "IgnitionEndpointAvailable True", "ReconciliationActive True",
"ValidReleaseImage True", "ReconciliationSucceeded True"})
} else {
return checkSubstringWithNoExit(res,
[]string{"ValidHostedControlPlaneConfiguration True", "ClusterVersionSucceeding True",
"Degraded False", "EtcdAvailable True", "KubeAPIServerAvailable True", "InfrastructureReady True",
"Available True", "ValidConfiguration True", "SupportedHostedCluster True",
"ValidHostedControlPlaneConfiguration True", "IgnitionEndpointAvailable True", "ReconciliationActive True",
"ValidReleaseImage True", "ReconciliationSucceeded True"})
}
} | hypershift | ||||
function | openshift/openshift-tests-private | c10518d4-f75a-424a-a650-9b43fad3a1be | checkNodepoolAllConditions | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolAllConditions(npName string) func() bool {
return func() bool {
res := doOcpReq(h.oc, OcpGet, true, "nodepools", "-n", h.namespace, npName, `-ojsonpath={range .status.conditions[*]}{@.type}{" "}{@.status}{" "}{end}`)
return checkSubstringWithNoExit(res, []string{"AutoscalingEnabled False", "UpdateManagementEnabled True", "ValidReleaseImage True", "ValidPlatformImage True", "AWSSecurityGroupAvailable True", "ValidMachineConfig True", "ValidGeneratedPayload True", "ReachedIgnitionEndpoint True", "ValidTuningConfig True", "ReconciliationActive True", "AllMachinesReady True", "AllNodesHealthy True", "Ready True"})
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 0a9b7c68-480f-4fa2-9a11-116a48cd3246 | getHostedclusterConsoleInfo | ['"encoding/base64"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedclusterConsoleInfo() (string, string) {
url, cerr := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpWhoami).Args("--show-console").Output()
o.Expect(cerr).ShouldNot(o.HaveOccurred())
pwdbase64, pswerr := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "secret",
"kubeadmin-password", "-ojsonpath={.data.password}").Output()
o.Expect(pswerr).ShouldNot(o.HaveOccurred())
pwd, err := base64.StdEncoding.DecodeString(pwdbase64)
o.Expect(err).ShouldNot(o.HaveOccurred())
return url, string(pwd)
} | hypershift | |||
function | openshift/openshift-tests-private | 122c6b25-dfac-4b69-adfa-68f0201b4751 | createAwsNodePool | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) createAwsNodePool(name string, nodeCount int) {
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool aws --name %s --namespace %s --cluster-name %s --node-count %d",
name, h.namespace, h.name, nodeCount)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | ba80e02e-4e82-49bf-8178-73218cb2a26b | createAwsInPlaceNodePool | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) createAwsInPlaceNodePool(name string, nodeCount int, dir string) {
npFile := dir + "/np-inplace.yaml"
var bashClient = NewCmdClient().WithShowInfo(true)
cmd := fmt.Sprintf("hypershift create nodepool aws --name %s --namespace %s --cluster-name %s --node-count %d --render > %s", name, h.namespace, h.name, nodeCount, npFile)
_, err := bashClient.Run(cmd).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
cmdSed := fmt.Sprintf("sed -i 's/Replace/InPlace/g' %s", npFile)
_, err = bashClient.Run(cmdSed).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
err = h.oc.AsAdmin().WithoutNamespace().Run(OcpCreate).Args("-f", npFile).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | f68780bc-0eba-4c2e-b2ea-c2d4718d911c | getAzureNodePoolImageType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureNodePoolImageType(npName string) azureNodepoolImageType {
return azureNodepoolImageType(doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureImageType}"))
} | hypershift | ||||
function | openshift/openshift-tests-private | 26471bd5-df4b-4392-8ee1-a72af379462a | getAzureDefaultNodePoolImageType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureDefaultNodePoolImageType() azureNodepoolImageType {
return h.getAzureNodePoolImageType(h.name)
} | hypershift | ||||
function | openshift/openshift-tests-private | f8b33a23-0c1c-4dcd-80a6-d3cdaf22fd5a | getAzureNodePoolImageId | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureNodePoolImageId(npName string) string {
return doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.imageID}")
} | hypershift | ||||
function | openshift/openshift-tests-private | 4b21772d-53f6-41ad-a435-c2c3c57fc2c1 | getAzureDefaultNodePoolImageId | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureDefaultNodePoolImageId() string {
return h.getAzureNodePoolImageId(h.name)
} | hypershift | ||||
function | openshift/openshift-tests-private | 28d10f15-804f-4600-89c7-41069f84bfd1 | getAzureNodePoolMarketplaceImage | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureNodePoolMarketplaceImage(npName string) *azureMarketplaceImage {
return &azureMarketplaceImage{
Offer: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.offer}"),
Publisher: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.publisher}"),
SKU: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.sku}"),
Version: doOcpReq(h.oc, OcpGet, true, "np", npName, "-n", h.namespace, "-o=jsonpath={.spec.platform.azure.image.azureMarketplace.version}"),
}
} | hypershift | ||||
function | openshift/openshift-tests-private | c429852b-a5ad-489f-8a6b-f1b0f9e075c5 | getAzureDefaultNodePoolMarketplaceImage | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAzureDefaultNodePoolMarketplaceImage() *azureMarketplaceImage {
return h.getAzureNodePoolMarketplaceImage(h.name)
} | hypershift | ||||
function | openshift/openshift-tests-private | 85cee33a-7c5d-4da5-8006-355f1572d529 | createAdditionalAzureNodePool | ['"k8s.io/utils/ptr"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) createAdditionalAzureNodePool(name string, nodeCount int) {
np := NewAzureNodePool(name, h.name, h.namespace).WithNodeCount(ptr.To(nodeCount)).WithSubnetId(h.getAzureSubnetId())
switch imageType := h.getAzureDefaultNodePoolImageType(); imageType {
case azureNodepoolImageTypeId:
np.WithImageId(h.getAzureDefaultNodePoolImageId())
case azureNodepoolImageTypeMarketplace:
np.WithMarketplaceImage(h.getAzureDefaultNodePoolMarketplaceImage())
default:
e2e.Failf("Unknown Azure Nodepool image type: %s", imageType)
}
np.CreateAzureNodePool()
} | hypershift | |||
function | openshift/openshift-tests-private | 147f32ff-a4c5-4903-a921-54bd4502734a | deleteNodePool | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) deleteNodePool(name string) {
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpDelete).Args("--ignore-not-found", "nodepool", "-n", h.namespace, name).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | ||||
function | openshift/openshift-tests-private | 8fba646a-bcfd-438a-8638-0b517b148103 | checkNodePoolReady | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodePoolReady(name string) bool {
//check condition {Ready:True}
readyCond := `-ojsonpath={.status.conditions[?(@.type=="Ready")].status}`
isReady, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("np", "-n", h.namespace, name, readyCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(isReady, "True") {
e2e.Logf("nodePool ready condition: %s", isReady)
return false
}
//check condition {AutoscalingEnabled:True/False}
autoScalCond := `-ojsonpath={.status.conditions[?(@.type=="AutoscalingEnabled")].status}`
autoscaleEnabled, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("np", "-n", h.namespace, name, autoScalCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
//if not autoscaleEnabled, check replicas is as expected
if autoscaleEnabled != "True" {
desiredNodes, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("np", "-n", h.namespace, name, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
currentNodes, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("np", "-n", h.namespace, name, "-o=jsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return desiredNodes == currentNodes
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | abecae0a-e800-4225-bf15-e84c4435797b | pollCheckHostedClustersNodePoolReady | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckHostedClustersNodePoolReady(name string) func() bool {
return func() bool {
return h.checkNodePoolReady(name)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 256e20a8-3f84-4342-ac04-9478f57d934b | setNodepoolAutoScale | ['"encoding/json"', '"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) setNodepoolAutoScale(name, max, min string) {
removeNpConfig := `[{"op": "remove", "path": "/spec/replicas"}]`
autoscaleConfig := fmt.Sprintf(`--patch={"spec": {"autoScaling": {"max": %s, "min":%s}}}`, max, min)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, "--type=json", "-p", removeNpConfig).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
_, err = h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, autoscaleConfig, "--type=merge").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | adb0ba29-7508-461a-a288-d44a51e559e7 | getHostedClusterNodeNameByLabelFilter | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClusterNodeNameByLabelFilter(labelFilter string) string {
nameCond := `-ojsonpath={.items[*].metadata.name}`
nodesName, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args("node", "--ignore-not-found", "-l", labelFilter, nameCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return nodesName
} | hypershift | ||||
function | openshift/openshift-tests-private | 77d8bc61-977a-40bd-8ba3-a4bc51268bd2 | getHostedClusterNodeReadyStatus | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getHostedClusterNodeReadyStatus(nodeName string) string {
labelFilter := "kubernetes.io/hostname=" + nodeName
readyCond := `-ojsonpath={.items[].status.conditions[?(@.type=="Ready")].status}`
status, err := h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args("node", "--ignore-not-found", "-l", labelFilter, readyCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return status
} | hypershift | ||||
function | openshift/openshift-tests-private | f38c823d-4404-4ac3-89a5-657a212cae9e | setNodepoolAutoRepair | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) setNodepoolAutoRepair(name, enabled string) {
autoRepairConfig := fmt.Sprintf(`--patch={"spec": {"management": {"autoRepair": %s}}}`, enabled)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, autoRepairConfig, "--type=merge").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | 90646648-1142-4be8-b791-f16dacf08ee2 | pollCheckNodepoolAutoRepairDisabled | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodepoolAutoRepairDisabled(name string) func() bool {
return func() bool {
return h.checkNodepoolAutoRepairDisabled(name)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | d459d5b8-0034-4770-8584-fa2983fdf786 | checkNodepoolAutoRepairDisabled | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolAutoRepairDisabled(name string) bool {
//check nodeool status
autoRepairCond := `-ojsonpath={.status.conditions[?(@.type=="AutorepairEnabled")].status}`
rc, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, autoRepairCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.Contains(rc, "True") {
return false
}
//check mhc should not exist
mchCAPI := "machinehealthchecks.cluster.x-k8s.io"
rc, err = h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, name, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(rc) > 0 {
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | 612e25dd-4856-4276-bb7f-310bdc7bb7d4 | pollCheckNodepoolAutoRepairEnabled | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodepoolAutoRepairEnabled(name string) func() bool {
return func() bool {
return h.checkNodepoolAutoRepairEnabled(name)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 0a7e3d88-c5c2-4525-8b4a-a6a85ed3d0c4 | checkNodepoolAutoRepairEnabled | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolAutoRepairEnabled(name string) bool {
//check nodeool status
autoRepairCond := `-ojsonpath={.status.conditions[?(@.type=="AutorepairEnabled")].status}`
rc, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, autoRepairCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(rc, "True") {
return false
}
//get np replica
npReplica, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, "-ojsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
//check mhc currentHealthy, mch name is same with nodepool name
mchCAPI := "machinehealthchecks.cluster.x-k8s.io"
currentHealthyNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, name, "-ojsonpath={.status.currentHealthy}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return npReplica == currentHealthyNum
} | hypershift | |||
function | openshift/openshift-tests-private | 43fcb370-2505-4c78-bf30-35b939ccb00e | pollCheckNodeHealthByMHC | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodeHealthByMHC(mhcName string) func() bool {
return func() bool {
return h.checkNodeHealthByMHC(mhcName)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | f2c5393f-211d-4ddd-ad19-644bef9651b4 | checkNodeHealthByMHC | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodeHealthByMHC(mhcName string) bool {
mchCAPI := "machinehealthchecks.cluster.x-k8s.io"
expectedMachineCond := `-ojsonpath={.status.expectedMachines}`
expectedMachineNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, mhcName, expectedMachineCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
currentHealthyCond := `-ojsonpath={.status.currentHealthy}`
currentHealthyNum, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, mchCAPI, mhcName, currentHealthyCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return expectedMachineNum == currentHealthyNum
} | hypershift | ||||
function | openshift/openshift-tests-private | ee537f17-ab5c-45c2-989d-278db02994df | pollCheckDeletedNodePool | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckDeletedNodePool(npName string) func() bool {
return func() bool {
return h.checkDeletedNodePool(npName)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | d7ec57fb-e8f0-4496-ad2f-af3b44a514b7 | checkDeletedNodePool | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkDeletedNodePool(npName string) bool {
rc, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "np", npName, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(strings.TrimSpace(rc)) > 0 {
return false
}
params := []string{"no", "--ignore-not-found", "-l", "hypershift.openshift.io/nodePool=" + npName}
rc, err = h.oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run(OcpGet).Args(params...).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(strings.TrimSpace(rc)) > 0 {
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | 751c6626-1dd7-44c7-a5ce-8323838186be | pollCheckNodepoolCurrentNodes | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodepoolCurrentNodes(name, expected string) func() bool {
return func() bool {
return h.checkNodepoolCurrentNodes(name, expected)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | c82ecfb6-855a-43d2-9ddf-849ac54a0acb | checkNodepoolCurrentNodes | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodepoolCurrentNodes(name, expected string) bool {
currentNodes, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", "-n", h.namespace, name, "-o=jsonpath={.status.replicas}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return currentNodes == expected
} | hypershift | ||||
function | openshift/openshift-tests-private | b0a4abbd-eab2-49b2-8ee9-0035d65b8f49 | isNodepoolAutosaclingEnabled | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) isNodepoolAutosaclingEnabled(name string) bool {
autoScalCond := `-ojsonpath={.status.conditions[?(@.type=="AutoscalingEnabled")].status}`
autoscaleEnabled, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", "-n", h.namespace, name, autoScalCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return strings.Contains(autoscaleEnabled, "True")
} | hypershift | |||
function | openshift/openshift-tests-private | 80f64ad7-4c6c-41b0-b02a-f383b6e71bce | pollCheckAllNodepoolReady | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckAllNodepoolReady() func() bool {
return func() bool {
return h.checkAllNodepoolReady()
}
} | hypershift | ||||
function | openshift/openshift-tests-private | e3007d6b-0dd3-4577-bf9c-b6186f87e3c0 | checkAllNodepoolReady | ['"fmt"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAllNodepoolReady() bool {
nodeReadyCond := fmt.Sprintf(`-ojsonpath={.items[?(@.spec.clusterName=="%s")].status.conditions[?(@.type=="Ready")].status}`, h.name)
nodesStatus, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", nodeReadyCond, "--namespace", h.namespace).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(nodesStatus) <= 0 {
return true
}
if strings.Contains(nodesStatus, "False") {
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | be68c6b7-782b-4f8c-9ee6-bc9aaae639e4 | pollCheckNodePoolConditions | ['hostedCluster', 'nodePoolCondition'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodePoolConditions(npName string, conditions []nodePoolCondition) func() bool {
return func() bool {
return h.checkNodePoolConditions(npName, conditions)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | fba8c980-c87f-44fd-9238-1db0114bf0de | checkNodePoolConditions | ['"fmt"', '"strings"'] | ['hostedCluster', 'nodePoolCondition'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkNodePoolConditions(npName string, conditions []nodePoolCondition) bool {
o.Expect(doOcpReq(h.oc, OcpGet, true, "nodepools", "-n", h.namespace, "-ojsonpath={.items[*].metadata.name}")).Should(o.ContainSubstring(npName))
for _, condition := range conditions {
res := doOcpReq(h.oc, OcpGet, false, "nodepools", npName, "-n", h.namespace, fmt.Sprintf(`-ojsonpath={.status.conditions[?(@.type=="%s")].%s}`, condition.conditionsType, condition.conditionsTypeReq))
e2e.Logf("checkNodePoolStatus: %s, %s, expected: %s, res: %s", condition.conditionsType, condition.conditionsTypeReq, condition.expectConditionsResult, res)
if !strings.Contains(res, condition.expectConditionsResult) {
return false
}
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | e6c94ad9-e4fd-426d-a304-f4d81e556a0d | getNodepoolPayload | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodepoolPayload(name string) string {
return doOcpReq(h.oc, OcpGet, true, "nodepools", name, "-n", h.namespace, `-ojsonpath={.spec.release.image}`)
} | hypershift | ||||
function | openshift/openshift-tests-private | 83597657-0178-402e-bfed-80e6769fbf32 | getNodepoolStatusPayloadVersion | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodepoolStatusPayloadVersion(name string) string {
payloadVersionCond := `-ojsonpath={.status.version}`
version, err := h.oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", name, "-n", h.namespace, payloadVersionCond).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return version
} | hypershift | ||||
function | openshift/openshift-tests-private | 36ef89dc-5a26-42ca-a81c-59c3e1d0b80a | upgradeNodepoolPayloadInPlace | ['"encoding/json"', '"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) upgradeNodepoolPayloadInPlace(name, payload string) {
doOcpReq(h.oc, OcpPatch, true, "nodepools", name, "-n", h.namespace, "--type=json", fmt.Sprintf(`-p=[{"op": "replace", "path": "/spec/release/image","value": "%s"}]`, payload))
} | hypershift | |||
function | openshift/openshift-tests-private | 0edad4c6-496c-475e-80a3-e7f9ec46f75f | pollCheckUpgradeNodepoolPayload | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckUpgradeNodepoolPayload(name, expectPayload, version string) func() bool {
return func() bool {
curPayload := h.getNodepoolPayload(name)
if strings.Contains(curPayload, expectPayload) {
v := h.getNodepoolStatusPayloadVersion(name)
if strings.Contains(v, version) {
return true
}
}
return false
}
} | hypershift | |||
function | openshift/openshift-tests-private | 416d0642-a591-4655-9416-8843003f1b56 | getCPReleaseImage | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getCPReleaseImage() string {
payload, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name,
`-ojsonpath={.spec.release.image}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return payload
} | hypershift | ||||
function | openshift/openshift-tests-private | db511b7f-28a9-47f4-be25-d53c40051ef3 | getCPPayloadTag | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getCPPayloadTag() string {
payload, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name,
`-ojsonpath={.status.version.history[?(@.state=="Completed")].version}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
// for multi payloads just use the first one
return strings.TrimSpace(strings.Split(payload, " ")[0])
} | hypershift | |||
function | openshift/openshift-tests-private | d08095f7-cd8e-47f0-8611-484f8c2767a3 | getCPDesiredPayload | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getCPDesiredPayload() string {
payload, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name,
`-ojsonpath={.status.version.desired.image}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return payload
} | hypershift | ||||
function | openshift/openshift-tests-private | 45c02616-1ba5-4cf7-9c7f-a06e7fb0c3ef | upgradeCPPayload | ['"encoding/json"', '"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) upgradeCPPayload(payload string) {
patchOption := fmt.Sprintf(`-p=[{"op": "replace", "path": "/spec/release/image","value": "%s"}]`, payload)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "hostedcluster", h.name,
"--type=json", patchOption).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | b62ccf35-df82-472d-90f9-f5f7be17cf99 | pollCheckUpgradeCPPayload | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckUpgradeCPPayload(payload string) func() bool {
return func() bool {
curPayload := h.getCPPayloadTag()
if strings.Contains(payload, curPayload) {
return true
}
return false
}
} | hypershift | |||
function | openshift/openshift-tests-private | 902bccc7-46a4-42f9-b54e-d2ff1218e70b | isFIPEnabled | ['"strconv"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) isFIPEnabled() bool {
res, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name, "-ojsonpath={.spec.fips}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
enable, err := strconv.ParseBool(res)
o.Expect(err).ShouldNot(o.HaveOccurred())
return enable
} | hypershift | |||
function | openshift/openshift-tests-private | 2ee7bfed-2058-4e8f-a6c3-eaebb86db754 | checkFIPInHostedCluster | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkFIPInHostedCluster() bool {
nodes, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpGet).Args("no", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
for _, nodename := range strings.Split(nodes, " ") {
res, err := h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args("node/"+nodename, "-q", "--", "chroot", "/host", "fips-mode-setup", "--check").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(res, "FIPS mode is enabled") {
e2e.Logf("Warning: node %s fips-mode-setup check FIP false", nodename)
return false
}
res, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args("node/"+nodename, "-q", "--", "cat", "/proc/sys/crypto/fips_enabled").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(res, "1") {
e2e.Logf("Warning: node %s /proc/sys/crypto/fips_enabled != 1", nodename)
return false
}
res, err = h.oc.AsGuestKubeconf().WithoutNamespace().Run(OcpDebug).Args("node/"+nodename, "-q", "--", "sysctl", "crypto.fips_enabled").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if !strings.Contains(res, "crypto.fips_enabled = 1") {
e2e.Logf("Warning: node %s crypto.fips_enabled != 1", nodename)
return false
}
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | ddd68a5b-0a9a-4ec5-8199-40d4def3c1f8 | isCPHighlyAvailable | ['"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) isCPHighlyAvailable() bool {
res, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "hostedcluster", h.name, "-ojsonpath={.spec.controllerAvailabilityPolicy}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return strings.Contains(res, HighlyAvailable)
} | hypershift | |||
function | openshift/openshift-tests-private | 0e9076c6-92a9-4b75-958d-c34e1ee61c9a | checkAWSRootVolumes | ['"fmt"', '"strconv"', '"strings"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAWSRootVolumes(name string, checkItem string, expected interface{}) bool {
awsmachineVolumeJSONPathPtn := `-ojsonpath={.items[?(@.metadata.annotations.hypershift\.openshift\.io/nodePool=="%s/%s")].spec.rootVolume.%s}`
awsmachineVolumeFilter := fmt.Sprintf(awsmachineVolumeJSONPathPtn, h.namespace, name, checkItem)
nodepoolVolumeFilter := fmt.Sprintf("-ojsonpath={.spec.platform.aws.rootVolume.%s}", checkItem)
var expectedVal string
switch v := expected.(type) {
case string:
expectedVal = v
case int64:
expectedVal = strconv.FormatInt(v, 10)
case *int64:
expectedVal = strconv.FormatInt(*v, 10)
default:
e2e.Logf("Error: not supported expected value while checking aws nodepool root-volume config")
return false
}
//check nodepool
rootVolumeConfig, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("np", name, "-n", h.namespace, nodepoolVolumeFilter).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.TrimSpace(rootVolumeConfig) != expectedVal {
e2e.Logf("Error: nodepool %s rootVolume item %s not matched: return %s and expect %s, original expected %v", name, checkItem, rootVolumeConfig, expectedVal, expected)
return false
}
//check awsmachine
awsRootVolumeConfig, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("awsmachines", "-n", h.namespace+"-"+h.name, awsmachineVolumeFilter).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if strings.TrimSpace(awsRootVolumeConfig) != expectedVal {
e2e.Logf("Error: awsmachine for nodepool %s rootVolume item %s not matched: return %s and expect %s, original expected %v", name, checkItem, awsRootVolumeConfig, expectedVal, expected)
return false
}
return true
} | hypershift | |||
function | openshift/openshift-tests-private | ceee4616-f663-4fcf-86a1-5b5e72698e79 | checkAWSNodepoolRootVolumeSize | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAWSNodepoolRootVolumeSize(name string, expectedSize int64) bool {
return h.checkAWSRootVolumes(name, "size", expectedSize)
} | hypershift | ||||
function | openshift/openshift-tests-private | d25dc413-1d63-4373-a5d1-7254105438d5 | checkAWSNodepoolRootVolumeIOPS | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAWSNodepoolRootVolumeIOPS(name string, expectedIOPS int64) bool {
return h.checkAWSRootVolumes(name, "iops", expectedIOPS)
} | hypershift | ||||
function | openshift/openshift-tests-private | 4ba3141b-ca43-476e-9131-8f1d8957e098 | checkAWSNodepoolRootVolumeType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAWSNodepoolRootVolumeType(name string, expectedType string) bool {
return h.checkAWSRootVolumes(name, "type", expectedType)
} | hypershift | ||||
function | openshift/openshift-tests-private | 165358b6-1242-42ca-9386-816ccdfe99d7 | setAWSNodepoolInstanceType | ['"fmt"'] | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) setAWSNodepoolInstanceType(name, instanceType string) {
cond := fmt.Sprintf(`--patch={"spec": {"platform": {"aws": {"instanceType":"%s"}}}}`, instanceType)
_, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpPatch).Args("-n", h.namespace, "nodepools", name, cond, "--type=merge").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
} | hypershift | |||
function | openshift/openshift-tests-private | b198571f-5d5b-4618-b364-e1ce46d97816 | getAWSNodepoolInstanceType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getAWSNodepoolInstanceType(name string) string {
cond := `-ojsonpath={.spec.platform.aws.instanceType}`
instanceType, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).ShouldNot(o.BeEmpty())
return instanceType
} | hypershift | ||||
function | openshift/openshift-tests-private | d4da8b3b-d072-4e64-8a05-54e25cc97330 | getNodepoolUpgradeType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) getNodepoolUpgradeType(name string) string {
cond := `-ojsonpath={.spec.management.upgradeType}`
instanceType, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace, "nodepools", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(instanceType).ShouldNot(o.BeEmpty())
return instanceType
} | hypershift | ||||
function | openshift/openshift-tests-private | 30726c04-8994-469a-a170-50a10f7cd925 | pollCheckAWSNodepoolInstanceType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckAWSNodepoolInstanceType(name, expected string) func() bool {
return func() bool {
return h.checkAWSNodepoolInstanceType(name, expected)
}
} | hypershift | ||||
function | openshift/openshift-tests-private | 8c7fbe55-5ee0-45f1-a851-c71e6ae2720d | checkAWSNodepoolInstanceType | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) checkAWSNodepoolInstanceType(name, expected string) bool {
// check nodepool instanceType
instanceType := h.getAWSNodepoolInstanceType(name)
if instanceType != expected {
e2e.Logf("instanceType not matched, expected: %s, got: %s", expected, instanceType)
return false
}
// check awsmachinetemplates instanceType
cond := `-ojsonpath={.spec.template.spec.instanceType}`
templateInstanceType, err := h.oc.AsAdmin().WithoutNamespace().Run(OcpGet).Args("-n", h.namespace+"-"+h.name, "awsmachinetemplates", name, cond, "--ignore-not-found").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
o.Expect(templateInstanceType).ShouldNot(o.BeEmpty())
return templateInstanceType == expected
} | hypershift | ||||
function | openshift/openshift-tests-private | e4ac8a4c-33dc-4d35-ad73-d3e77fb7f0a8 | pollCheckNodepoolRollingUpgradeIntermediateStatus | ['hostedCluster'] | github.com/openshift/openshift-tests-private/test/extended/hypershift/hostedclusters.go | func (h *hostedCluster) pollCheckNodepoolRollingUpgradeIntermediateStatus(name string) func() bool {
return func() bool {
return h.checkNodepoolRollingUpgradeIntermediateStatus(name)
}
} | hypershift |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.