element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
ced4b237-0f61-4bfb-aec1-2a3056cc069e
|
GetRawValue
|
['"k8s.io/client-go/util/jsonpath"']
|
['JSONData']
|
github.com/openshift/openshift-tests-private/test/extended/mco/jsondata.go
|
func (jd *JSONData) GetRawValue(jsonPath string) ([]interface{}, error) {
j := jsonpath.New("parser: " + jsonPath)
if err := j.Parse(jsonPath); err != nil {
return nil, err
}
fullResults, err := j.FindResults(jd.data)
if err != nil {
return nil, err
}
returnResults := make([]interface{}, 0, len(fullResults))
for _, result := range fullResults {
res := make([]interface{}, 0, len(result))
for i := range result {
res = append(res, result[i].Interface())
}
returnResults = append(returnResults, res)
}
return returnResults, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
b14272ee-9d5a-4d7a-9c82-44c64697e126
|
GetJSONPath
|
['JSONData']
|
github.com/openshift/openshift-tests-private/test/extended/mco/jsondata.go
|
func (jd *JSONData) GetJSONPath(jsonPath string) ([]JSONData, error) {
allResults, err := jd.GetRawValue(jsonPath)
if err != nil {
return nil, err
}
flatResults := flattenResults(allResults)
return flatResults, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9c91335c-7f09-4743-9c45-6edacde25af8
|
flattenResults
|
['JSONData']
|
github.com/openshift/openshift-tests-private/test/extended/mco/jsondata.go
|
func flattenResults(allExpresults []interface{}) []JSONData {
flatResults := []JSONData{}
for i := range allExpresults {
var expression = allExpresults[i].([]interface{})
for _, result := range expression {
flatResults = append(flatResults, JSONData{result})
}
}
return flatResults
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
cb0f5a14-1372-4473-b7c7-05b708c49bce
|
machineconfig
|
import (
"context"
"fmt"
"strings"
"time"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
package mco
import (
"context"
"fmt"
"strings"
"time"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
// MachineConfigList handles list of nodes
type MachineConfigList struct {
ResourceList
}
// MachineConfig struct is used to handle MachineConfig resources in OCP
type MachineConfig struct {
Resource
Template
pool string
parameters []string
skipWaitForMcp bool
}
// NewMachineConfig create a NewMachineConfig struct
func NewMachineConfig(oc *exutil.CLI, name, pool string) *MachineConfig {
mc := &MachineConfig{Resource: *NewResource(oc, "mc", name), pool: pool}
return mc.SetTemplate(*NewMCOTemplate(oc, GenericMCTemplate))
}
// NewMachineConfigList construct a new node list struct to handle all existing nodes
func NewMachineConfigList(oc *exutil.CLI) *MachineConfigList {
return &MachineConfigList{*NewResourceList(oc, "mc")}
}
// SetTemplate sets the template that will be used by the "create" method in order to create the MC
func (mc *MachineConfig) SetTemplate(template Template) *MachineConfig {
mc.Template = template
return mc
}
// SetMCOTemplate set a template defined in the MCO testdata folder
func (mc *MachineConfig) SetMCOTemplate(templateName string) *MachineConfig {
mc.Template = *NewMCOTemplate(mc.oc, templateName)
return mc
}
// SetParams set parameters defined in template
func (mc *MachineConfig) SetParams(params ...string) *MachineConfig {
if len(params) > 0 {
mc.parameters = append(mc.parameters, params...)
}
return mc
}
func (mc *MachineConfig) create() {
mc.name = mc.name + "-" + exutil.GetRandomString()
params := []string{"-p", "NAME=" + mc.name, "POOL=" + mc.pool}
params = append(params, mc.parameters...)
mc.Create(params...)
immediate := false
pollerr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 1*time.Minute, immediate, func(_ context.Context) (bool, error) {
stdout, err := mc.Get(`{.metadata.name}`)
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, mc.name) {
logger.Infof("mc %s is created successfully", mc.name)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(pollerr, fmt.Sprintf("create machine config %v failed", mc.name))
if !mc.skipWaitForMcp {
mcp := NewMachineConfigPool(mc.oc, mc.pool)
if mc.GetKernelTypeSafe() != "" {
mcp.SetWaitingTimeForKernelChange() // Since we configure a different kernel we wait longer for completion
}
if mc.HasExtensionsSafe() {
mcp.SetWaitingTimeForExtensionsChange() // Since we configure extra extension we need to wait longer for completion
}
mcp.waitForComplete()
}
}
// we need this method to be able to delete the MC without waiting for success.
// TODO: This method should be deleted when we refactor the MC struct to embed the Resource struct. But right now we have no other choice.
func (mc *MachineConfig) deleteNoWait() error {
return mc.Delete()
}
func (mc *MachineConfig) delete() {
// This method waits a minimum of 1 minute for the MCP to be updated after the MC has been deleted.
// It is very expensive, since this method is deferred very often and in those cases the MC has been already deleted.
// In order to improve the performance we do nothing if the MC does not exist.
if !mc.Exists() {
logger.Infof("MachineConfig %s does not exist. We will not try to delete it.", mc.GetName())
return
}
mcp := NewMachineConfigPool(mc.oc, mc.pool)
if mc.GetKernelTypeSafe() != "" {
mcp.SetWaitingTimeForKernelChange() // If the MC is configuring a different kernel, we increase the waiting period
}
err := mc.oc.AsAdmin().WithoutNamespace().Run("delete").Args("mc", mc.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
mcp.waitForComplete()
}
// GetExtensions returns all the extensions configured in this MC
func (mc *MachineConfig) GetExtensions() (string, error) {
return mc.Get(`{.spec.extensions}`)
}
// GetAuthorizedKeysByUser returns the authorizedkeys that this MC defines for the given user in a json list format
func (mc *MachineConfig) GetAuthorizedKeysByUser(user string) (string, error) {
return mc.Get(fmt.Sprintf(`{.spec.config.passwd.users[?(@.name=="%s")].sshAuthorizedKeys}`, user))
}
// Get the kernelType configured in this MC. If any arror happens it returns an empty string
func (mc *MachineConfig) GetKernelTypeSafe() string {
return mc.GetSafe(`{.spec.kernelType}`, "")
}
// HasExtensionsSafe returns true if the MC has any extension configured
func (mc *MachineConfig) HasExtensionsSafe() bool {
ext := mc.GetSafe(`{.spec.extensions}`, "[]")
return ext != "[]" && ext != ""
}
// GetAuthorizedKeysByUserAsList returns the authorizedkeys that this MC defines for the given user as a list of strings
func (mc *MachineConfig) GetAuthorizedKeysByUserAsList(user string) ([]string, error) {
listKeys := []string{}
keys, err := mc.Get(fmt.Sprintf(`{.spec.config.passwd.users[?(@.name=="%s")].sshAuthorizedKeys}`, user))
if err != nil {
return nil, err
}
if keys == "" {
return listKeys, nil
}
jKeys := JSON(keys)
for _, key := range jKeys.Items() {
listKeys = append(listKeys, key.ToString())
}
return listKeys, err
}
// GetIgnitionVersion returns the ignition version used in the MC
func (mc *MachineConfig) GetIgnitionVersion() (string, error) {
return mc.Get(`{.spec.config.ignition.version}`)
}
// GetAll returns a []MachineConfig list with all existing MCs
func (mcl *MachineConfigList) GetAll() ([]MachineConfig, error) {
allMCResources, err := mcl.ResourceList.GetAll()
if err != nil {
return nil, err
}
allMCs := make([]MachineConfig, 0, len(allMCResources))
for _, item := range allMCResources {
mcRes := item
// disable the log spam while getting the MCs' "pool"
mcRes.oc.NotShowInfo()
defer mcRes.oc.SetShowInfo()
allMCs = append(allMCs,
*NewMachineConfig(mcl.oc,
mcRes.name,
// TODO: why do we have to provide the pool in when constructing a MC.
// the pool is actually a label and there are machineconfigs without pool, it should not be mandatory
mcRes.GetOrFail(`{.metadata.labels.machineconfiguration\.openshift\.io/role}`)))
}
return allMCs, nil
}
// GetMachineConfigCreatedByMCPs returns a list of the machineconfigs that were created by a MCP
func (mcl *MachineConfigList) GetMCPRenderedMachineConfigs() ([]MachineConfig, error) {
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].kind=="MachineConfigPool")`)
return mcl.GetAll()
}
// GetMachineConfigsWithNameStartingWithRender returns a list with all the MCs whose name starts with "render-"
func (mcl *MachineConfigList) GetMachineConfigsWithNameStartingWithRender() ([]MachineConfig, error) {
allMCs, err := mcl.GetAll()
if err != nil {
return nil, err
}
returnMCs := []MachineConfig{}
for _, mc := range allMCs {
if strings.HasPrefix(mc.GetName(), "rendered-") {
returnMCs = append(returnMCs, mc)
}
}
return returnMCs, nil
}
// GetRenderedMachineConfigForMaster returns a list with all the MCs whose name starts with "render-master"
func (mcl *MachineConfigList) GetRenderedMachineConfigForMaster() ([]MachineConfig, error) {
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].name=="master")`)
allMCs, err := mcl.GetAll()
if err != nil {
return nil, err
}
returnMCs := []MachineConfig{}
for _, mc := range allMCs {
if strings.HasPrefix(mc.GetName(), "rendered-master") {
returnMCs = append(returnMCs, mc)
}
}
return returnMCs, nil
}
func (mcl *MachineConfigList) GetRenderedMachineConfigForMasterOrFail() []MachineConfig {
renderedMcMasterList, err := mcl.GetRenderedMachineConfigForMaster()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
return renderedMcMasterList
}
// GetMachineConfigCreatedByMCPs returns a list of the machineconfigs that were created by a MCP
func (mcl *MachineConfigList) GetMCPRenderedMachineConfigsOrFail() []MachineConfig {
renderedMcList, err := mcl.GetRenderedMachineConfigForMaster()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
return renderedMcList
}
// GetRenderedMachineConfigForWorker returns a list with all the MCs whose name starts with "render-worker"
func (mcl *MachineConfigList) GetRenderedMachineConfigForWorker() ([]MachineConfig, error) {
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].name=="worker")`)
allMCs, err := mcl.GetAll()
if err != nil {
return nil, err
}
returnMCs := []MachineConfig{}
for _, mc := range allMCs {
if strings.HasPrefix(mc.GetName(), "rendered-worker") {
returnMCs = append(returnMCs, mc)
}
}
return returnMCs, nil
}
func (mcl *MachineConfigList) GetRenderedMachineConfigForWorkerOrFail() []MachineConfig {
renderedMcWorkerList, err := mcl.GetRenderedMachineConfigForWorker()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
return renderedMcWorkerList
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
90d0cc7c-0afe-4bd3-92c8-a5a49f3d1119
|
NewMachineConfig
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func NewMachineConfig(oc *exutil.CLI, name, pool string) *MachineConfig {
mc := &MachineConfig{Resource: *NewResource(oc, "mc", name), pool: pool}
return mc.SetTemplate(*NewMCOTemplate(oc, GenericMCTemplate))
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f2ccd7ca-089c-4876-9fb7-0a29394c2d14
|
NewMachineConfigList
|
['MachineConfigList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func NewMachineConfigList(oc *exutil.CLI) *MachineConfigList {
return &MachineConfigList{*NewResourceList(oc, "mc")}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
6d68eb8f-2f5c-4d14-ae7e-421ef2f15856
|
SetTemplate
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) SetTemplate(template Template) *MachineConfig {
mc.Template = template
return mc
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
00fcd450-164a-4cb9-b742-10ed4710a2f3
|
SetMCOTemplate
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) SetMCOTemplate(templateName string) *MachineConfig {
mc.Template = *NewMCOTemplate(mc.oc, templateName)
return mc
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
c5982ed5-1497-4c7a-a7d1-df549f306532
|
SetParams
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) SetParams(params ...string) *MachineConfig {
if len(params) > 0 {
mc.parameters = append(mc.parameters, params...)
}
return mc
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
526e92d8-0be5-4d8d-9c06-80435389e81d
|
create
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) create() {
mc.name = mc.name + "-" + exutil.GetRandomString()
params := []string{"-p", "NAME=" + mc.name, "POOL=" + mc.pool}
params = append(params, mc.parameters...)
mc.Create(params...)
immediate := false
pollerr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 1*time.Minute, immediate, func(_ context.Context) (bool, error) {
stdout, err := mc.Get(`{.metadata.name}`)
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, mc.name) {
logger.Infof("mc %s is created successfully", mc.name)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(pollerr, fmt.Sprintf("create machine config %v failed", mc.name))
if !mc.skipWaitForMcp {
mcp := NewMachineConfigPool(mc.oc, mc.pool)
if mc.GetKernelTypeSafe() != "" {
mcp.SetWaitingTimeForKernelChange() // Since we configure a different kernel we wait longer for completion
}
if mc.HasExtensionsSafe() {
mcp.SetWaitingTimeForExtensionsChange() // Since we configure extra extension we need to wait longer for completion
}
mcp.waitForComplete()
}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
5a810e82-d22e-4101-8412-20a0edcfb45e
|
deleteNoWait
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) deleteNoWait() error {
return mc.Delete()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
622076e2-ac66-46d7-89ff-433c0a72b6c5
|
delete
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) delete() {
// This method waits a minimum of 1 minute for the MCP to be updated after the MC has been deleted.
// It is very expensive, since this method is deferred very often and in those cases the MC has been already deleted.
// In order to improve the performance we do nothing if the MC does not exist.
if !mc.Exists() {
logger.Infof("MachineConfig %s does not exist. We will not try to delete it.", mc.GetName())
return
}
mcp := NewMachineConfigPool(mc.oc, mc.pool)
if mc.GetKernelTypeSafe() != "" {
mcp.SetWaitingTimeForKernelChange() // If the MC is configuring a different kernel, we increase the waiting period
}
err := mc.oc.AsAdmin().WithoutNamespace().Run("delete").Args("mc", mc.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
mcp.waitForComplete()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
7e532dbd-b41c-4caa-86c2-5e6e19fe881f
|
GetExtensions
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) GetExtensions() (string, error) {
return mc.Get(`{.spec.extensions}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
c3ce4ca3-92b6-4669-bde3-bf47b28e5124
|
GetAuthorizedKeysByUser
|
['"fmt"']
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) GetAuthorizedKeysByUser(user string) (string, error) {
return mc.Get(fmt.Sprintf(`{.spec.config.passwd.users[?(@.name=="%s")].sshAuthorizedKeys}`, user))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
770d88da-55f0-4e2d-84c9-2ffb46d408be
|
GetKernelTypeSafe
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) GetKernelTypeSafe() string {
return mc.GetSafe(`{.spec.kernelType}`, "")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
1d498f97-d9fe-43bb-924c-4df3ca5b3f2d
|
HasExtensionsSafe
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) HasExtensionsSafe() bool {
ext := mc.GetSafe(`{.spec.extensions}`, "[]")
return ext != "[]" && ext != ""
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e7d656bf-3d6b-4c1a-84d7-39edd89d01a1
|
GetAuthorizedKeysByUserAsList
|
['"fmt"']
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) GetAuthorizedKeysByUserAsList(user string) ([]string, error) {
listKeys := []string{}
keys, err := mc.Get(fmt.Sprintf(`{.spec.config.passwd.users[?(@.name=="%s")].sshAuthorizedKeys}`, user))
if err != nil {
return nil, err
}
if keys == "" {
return listKeys, nil
}
jKeys := JSON(keys)
for _, key := range jKeys.Items() {
listKeys = append(listKeys, key.ToString())
}
return listKeys, err
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
59fa0499-638b-4397-91c7-51df74865a8d
|
GetIgnitionVersion
|
['MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mc *MachineConfig) GetIgnitionVersion() (string, error) {
return mc.Get(`{.spec.config.ignition.version}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
5acae1b3-10d7-49d1-a6c6-22d59b268de0
|
GetAll
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetAll() ([]MachineConfig, error) {
allMCResources, err := mcl.ResourceList.GetAll()
if err != nil {
return nil, err
}
allMCs := make([]MachineConfig, 0, len(allMCResources))
for _, item := range allMCResources {
mcRes := item
// disable the log spam while getting the MCs' "pool"
mcRes.oc.NotShowInfo()
defer mcRes.oc.SetShowInfo()
allMCs = append(allMCs,
*NewMachineConfig(mcl.oc,
mcRes.name,
// TODO: why do we have to provide the pool in when constructing a MC.
// the pool is actually a label and there are machineconfigs without pool, it should not be mandatory
mcRes.GetOrFail(`{.metadata.labels.machineconfiguration\.openshift\.io/role}`)))
}
return allMCs, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
48976be9-1b3b-4f64-9037-5e485f261ddc
|
GetMCPRenderedMachineConfigs
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetMCPRenderedMachineConfigs() ([]MachineConfig, error) {
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].kind=="MachineConfigPool")`)
return mcl.GetAll()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
282aba25-796e-4448-8cc8-93d39967ff6f
|
GetMachineConfigsWithNameStartingWithRender
|
['"strings"']
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetMachineConfigsWithNameStartingWithRender() ([]MachineConfig, error) {
allMCs, err := mcl.GetAll()
if err != nil {
return nil, err
}
returnMCs := []MachineConfig{}
for _, mc := range allMCs {
if strings.HasPrefix(mc.GetName(), "rendered-") {
returnMCs = append(returnMCs, mc)
}
}
return returnMCs, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
059e2275-1a63-48c5-b9a7-07f086676cef
|
GetRenderedMachineConfigForMaster
|
['"strings"']
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetRenderedMachineConfigForMaster() ([]MachineConfig, error) {
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].name=="master")`)
allMCs, err := mcl.GetAll()
if err != nil {
return nil, err
}
returnMCs := []MachineConfig{}
for _, mc := range allMCs {
if strings.HasPrefix(mc.GetName(), "rendered-master") {
returnMCs = append(returnMCs, mc)
}
}
return returnMCs, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
1881649b-73b4-493d-b43a-22e74175267e
|
GetRenderedMachineConfigForMasterOrFail
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetRenderedMachineConfigForMasterOrFail() []MachineConfig {
renderedMcMasterList, err := mcl.GetRenderedMachineConfigForMaster()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
return renderedMcMasterList
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
93d58883-2bae-489a-a82c-4a72fb563735
|
GetMCPRenderedMachineConfigsOrFail
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetMCPRenderedMachineConfigsOrFail() []MachineConfig {
renderedMcList, err := mcl.GetRenderedMachineConfigForMaster()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
return renderedMcList
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
03e17915-1b18-4a4f-bd8a-cfde4e9f158d
|
GetRenderedMachineConfigForWorker
|
['"strings"']
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetRenderedMachineConfigForWorker() ([]MachineConfig, error) {
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].name=="worker")`)
allMCs, err := mcl.GetAll()
if err != nil {
return nil, err
}
returnMCs := []MachineConfig{}
for _, mc := range allMCs {
if strings.HasPrefix(mc.GetName(), "rendered-worker") {
returnMCs = append(returnMCs, mc)
}
}
return returnMCs, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ce7fa751-6bca-4f65-82bf-dd425dba4ec2
|
GetRenderedMachineConfigForWorkerOrFail
|
['MachineConfigList', 'MachineConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfig.go
|
func (mcl *MachineConfigList) GetRenderedMachineConfigForWorkerOrFail() []MachineConfig {
renderedMcWorkerList, err := mcl.GetRenderedMachineConfigForWorker()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
return renderedMcWorkerList
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
4ed8e68e-baea-48b7-9b45-dec91a571306
|
machineconfigpool
|
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
package mco
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// CertExprity describes the information that MCPs are reporting about a given certificate.
type CertExpiry struct {
// Bundle where the cert is storaged
Bundle string `json:"bundle"`
// Date fields have been temporarily removed by devs: https://github.com/openshift/machine-config-operator/pull/3866
// Expiry expiration date for the certificate
Expiry string `json:"expiry"`
// Subject certificate's subject
Subject string `json:"subject"`
}
// MachineConfigPool struct is used to handle MachineConfigPool resources in OCP
type MachineConfigPool struct {
template string
Resource
MinutesWaitingPerNode int
}
// MachineConfigPoolList struct handles list of MCPs
type MachineConfigPoolList struct {
ResourceList
}
// NewMachineConfigPool create a NewMachineConfigPool struct
func NewMachineConfigPool(oc *exutil.CLI, name string) *MachineConfigPool {
return &MachineConfigPool{Resource: *NewResource(oc, "mcp", name), MinutesWaitingPerNode: DefaultMinutesWaitingPerNode}
}
// MachineConfigPoolList construct a new node list struct to handle all existing nodes
func NewMachineConfigPoolList(oc *exutil.CLI) *MachineConfigPoolList {
return &MachineConfigPoolList{*NewResourceList(oc, "mcp")}
}
// String implements the Stringer interface
func (mcp *MachineConfigPool) create() {
exutil.CreateClusterResourceFromTemplate(mcp.oc, "--ignore-unknown-parameters=true", "-f", mcp.template, "-p", "NAME="+mcp.name)
mcp.waitForComplete()
}
func (mcp *MachineConfigPool) delete() {
logger.Infof("deleting custom mcp: %s", mcp.name)
err := mcp.oc.AsAdmin().WithoutNamespace().Run("delete").Args("mcp", mcp.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (mcp *MachineConfigPool) pause(enable bool) {
logger.Infof("patch mcp %v, change spec.paused to %v", mcp.name, enable)
err := mcp.Patch("merge", `{"spec":{"paused": `+strconv.FormatBool(enable)+`}}`)
o.Expect(err).NotTo(o.HaveOccurred())
}
// IsPaused return true is mcp is paused
func (mcp *MachineConfigPool) IsPaused() bool {
return IsTrue(mcp.GetOrFail(`{.spec.paused}`))
}
// IsCustom returns true if the pool is not the master pool nor the worker pool
func (mcp *MachineConfigPool) IsCustom() bool {
return !mcp.IsMaster() && !mcp.IsWorker()
}
// IsMaster returns true if the pool is the master pool
func (mcp *MachineConfigPool) IsMaster() bool {
return mcp.GetName() == MachineConfigPoolMaster
}
// IsWorker returns true if the pool is the worker pool
func (mcp *MachineConfigPool) IsWorker() bool {
return mcp.GetName() == MachineConfigPoolWorker
}
// IsEmpty returns true if the pool has no nodes
func (mcp *MachineConfigPool) IsEmpty() bool {
var (
numNodes int
)
o.Eventually(func() (err error) {
numNodes, err = mcp.getMachineCount()
return err
}, "2m", "10s").Should(o.Succeed(),
"It was not possible to get the status.machineCount value for MPC %s", mcp.GetName())
return numNodes == 0
}
// GetMaxUnavailable gets the value of maxUnavailable
func (mcp *MachineConfigPool) GetMaxUnavailableInt() (int, error) {
maxUnavailableString, err := mcp.Get(`{.spec.maxUnavailable}`)
if err != nil {
return -1, err
}
if maxUnavailableString == "" {
logger.Infof("maxUnavailable not configured in mcp %s, default value is 1", mcp.GetName())
return 1, nil
}
maxUnavailableInt, convErr := strconv.Atoi(maxUnavailableString)
if convErr != nil {
logger.Errorf("Error converting maxUnavailableString to integer: %s", convErr)
return -1, convErr
}
return maxUnavailableInt, nil
}
// SetMaxUnavailable sets the value for maxUnavailable
func (mcp *MachineConfigPool) SetMaxUnavailable(maxUnavailable int) {
logger.Infof("patch mcp %v, change spec.maxUnavailable to %d", mcp.name, maxUnavailable)
err := mcp.Patch("merge", fmt.Sprintf(`{"spec":{"maxUnavailable": %d}}`, maxUnavailable))
o.Expect(err).NotTo(o.HaveOccurred())
}
// RemoveMaxUnavailable removes spec.maxUnavailable attribute from the pool config
func (mcp *MachineConfigPool) RemoveMaxUnavailable() {
logger.Infof("patch mcp %v, removing spec.maxUnavailable", mcp.name)
err := mcp.Patch("json", `[{ "op": "remove", "path": "/spec/maxUnavailable" }]`)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (mcp *MachineConfigPool) getConfigNameOfSpec() (string, error) {
output, err := mcp.Get(`{.spec.configuration.name}`)
logger.Infof("spec.configuration.name of mcp/%v is %v", mcp.name, output)
return output, err
}
func (mcp *MachineConfigPool) getConfigNameOfSpecOrFail() string {
config, err := mcp.getConfigNameOfSpec()
o.Expect(err).NotTo(o.HaveOccurred(), "Get config name of spec failed")
return config
}
func (mcp *MachineConfigPool) getConfigNameOfStatus() (string, error) {
output, err := mcp.Get(`{.status.configuration.name}`)
logger.Infof("status.configuration.name of mcp/%v is %v", mcp.name, output)
return output, err
}
func (mcp *MachineConfigPool) getConfigNameOfStatusOrFail() string {
config, err := mcp.getConfigNameOfStatus()
o.Expect(err).NotTo(o.HaveOccurred(), "Get config name of status failed")
return config
}
func (mcp *MachineConfigPool) getMachineCount() (int, error) {
machineCountStr, ocErr := mcp.Get(`{.status.machineCount}`)
if ocErr != nil {
logger.Infof("Error getting machineCount: %s", ocErr)
return -1, ocErr
}
if machineCountStr == "" {
return -1, fmt.Errorf(".status.machineCount value is not already set in MCP %s", mcp.GetName())
}
machineCount, convErr := strconv.Atoi(machineCountStr)
if convErr != nil {
logger.Errorf("Error converting machineCount to integer: %s", ocErr)
return -1, convErr
}
return machineCount, nil
}
func (mcp *MachineConfigPool) getDegradedMachineCount() (int, error) {
dmachineCountStr, ocErr := mcp.Get(`{.status.degradedMachineCount}`)
if ocErr != nil {
logger.Errorf("Error getting degradedmachineCount: %s", ocErr)
return -1, ocErr
}
dmachineCount, convErr := strconv.Atoi(dmachineCountStr)
if convErr != nil {
logger.Errorf("Error converting degradedmachineCount to integer: %s", ocErr)
return -1, convErr
}
return dmachineCount, nil
}
// getDegradedMachineCount returns the number of updated machines in the pool
func (mcp *MachineConfigPool) getUpdatedMachineCount() (int, error) {
umachineCountStr, ocErr := mcp.Get(`{.status.updatedMachineCount}`)
if ocErr != nil {
logger.Errorf("Error getting updatedMachineCount: %s", ocErr)
return -1, ocErr
}
umachineCount, convErr := strconv.Atoi(umachineCountStr)
if convErr != nil {
logger.Errorf("Error converting updatedMachineCount to integer: %s", ocErr)
return -1, convErr
}
return umachineCount, nil
}
func (mcp *MachineConfigPool) pollMachineCount() func() string {
return mcp.Poll(`{.status.machineCount}`)
}
func (mcp *MachineConfigPool) pollReadyMachineCount() func() string {
return mcp.Poll(`{.status.readyMachineCount}`)
}
func (mcp *MachineConfigPool) pollDegradedMachineCount() func() string {
return mcp.Poll(`{.status.degradedMachineCount}`)
}
// GetDegradedStatus returns the value of the 'Degraded' condition in the MCP
func (mcp *MachineConfigPool) GetDegradedStatus() (string, error) {
return mcp.Get(`{.status.conditions[?(@.type=="Degraded")].status}`)
}
func (mcp *MachineConfigPool) pollDegradedStatus() func() string {
return mcp.Poll(`{.status.conditions[?(@.type=="Degraded")].status}`)
}
// GetUpdatedStatus returns the value of the 'Updated' condition in the MCP
func (mcp *MachineConfigPool) GetUpdatedStatus() (string, error) {
return mcp.Get(`{.status.conditions[?(@.type=="Updated")].status}`)
}
// GetUpdatingStatus returns the value of 'Updating' condition in the MCP
func (mcp *MachineConfigPool) GetUpdatingStatus() (string, error) {
return mcp.Get(`{.status.conditions[?(@.type=="Updating")].status}`)
}
func (mcp *MachineConfigPool) pollUpdatedStatus() func() string {
return mcp.Poll(`{.status.conditions[?(@.type=="Updated")].status}`)
}
func (mcp *MachineConfigPool) estimateWaitDuration() time.Duration {
var (
totalNodes int
guessedNodes = 3 // the number of nodes that we will use if we cannot get the actual number of nodes in the cluster
masterAdjust = 1.0
snoModifier = 0.0
emptyMCPWaitDuration = 2.0
minutesDuration = 1 * time.Minute
)
err := Retry(5, 3*time.Second, func() error {
var err error
totalNodes, err = mcp.getMachineCount()
return err
})
if err != nil {
logger.Errorf("Not able to get the number of nodes in the %s MCP. Making a guess of %d nodes. Err: %s", mcp.GetName(), guessedNodes, err)
totalNodes = guessedNodes
}
logger.Infof("Num nodes: %d, wait time per node %d minutes", totalNodes, mcp.MinutesWaitingPerNode)
// If the pool has no node configured, we wait at least 2.0 minute.
// There are tests that create pools with 0 nodes and wait for the pools to be updated. They cant wait 0 minutes.
// We wait 2.0 minutes and not 1 minute because many functions do not poll immediately and they wait a 1 minute interval before starting to poll.
// If we wait less than this interval the wait function will always fail
if totalNodes == 0 {
logger.Infof("Defining waiting time for pool with no nodes")
return time.Duration(emptyMCPWaitDuration * float64(minutesDuration))
}
if mcp.IsMaster() {
logger.Infof("Increase waiting time because it is master pool")
masterAdjust = 1.3 // if the pool is the master pool, we wait an extra 30% time
}
// Because of https://issues.redhat.com/browse/OCPBUGS-37501 in SNO MCPs can take up to 3 minutes more to be updated because the MCC is not taking the lease properly
if totalNodes == 1 {
var isSNO bool
err = Retry(5, 3*time.Second, func() error {
var snoErr error
isSNO, snoErr = IsSNOSafe(mcp.GetOC())
return snoErr
})
if err != nil {
logger.Errorf("Not able to know if the cluster is SNO. We guess it is SNO. Err: %s", err)
}
if isSNO || err != nil {
logger.Infof("Increase waiting time because it is SNO")
snoModifier = 3
}
}
return time.Duration(((float64(totalNodes*mcp.MinutesWaitingPerNode) * masterAdjust) + snoModifier) * float64(minutesDuration))
}
// SetWaitingTimeForKernelChange increases the time that the MCP will wait for the update to be executed
func (mcp *MachineConfigPool) SetWaitingTimeForKernelChange() {
mcp.MinutesWaitingPerNode = DefaultMinutesWaitingPerNode + KernelChangeIncWait
}
// SetWaitingTimeForExtensionsChange increases the time that the MCP will wait for the update to be executed
func (mcp *MachineConfigPool) SetWaitingTimeForExtensionsChange() {
mcp.MinutesWaitingPerNode = DefaultMinutesWaitingPerNode + ExtensionsChangeIncWait
}
// SetDefaultWaitingTime restore the default waiting time that the MCP will wait for the update to be executed
func (mcp *MachineConfigPool) SetDefaultWaitingTime() {
mcp.MinutesWaitingPerNode = DefaultMinutesWaitingPerNode
}
// GetInternalIgnitionConfigURL return the internal URL used by the nodes in this pool to get the ignition config
func (mcp *MachineConfigPool) GetInternalIgnitionConfigURL(secure bool) (string, error) {
var (
// SecurePort is the tls secured port to serve ignition configs
// InsecurePort is the port to serve ignition configs w/o tls
port = IgnitionSecurePort
protocol = "https"
)
internalAPIServerURI, err := GetAPIServerInternalURI(mcp.oc)
if err != nil {
return "", err
}
if !secure {
port = IgnitionInsecurePort
protocol = "http"
}
return fmt.Sprintf("%s://%s:%d/config/%s", protocol, internalAPIServerURI, port, mcp.GetName()), nil
}
// GetMCSIgnitionConfig returns the ignition config that the MCS is serving for this pool
func (mcp *MachineConfigPool) GetMCSIgnitionConfig(secure bool, ignitionVersion string) (string, error) {
var (
// SecurePort is the tls secured port to serve ignition configs
// InsecurePort is the port to serve ignition configs w/o tls
port = IgnitionSecurePort
)
if !secure {
port = IgnitionInsecurePort
}
url, err := mcp.GetInternalIgnitionConfigURL(secure)
if err != nil {
return "", err
}
// We will request the config from a master node
mMcp := NewMachineConfigPool(mcp.oc.AsAdmin(), MachineConfigPoolMaster)
masters, err := mMcp.GetNodes()
if err != nil {
return "", err
}
master := masters[0]
logger.Infof("Remove the IPV4 iptables rules that block the ignition config")
removedRules, err := master.RemoveIPTablesRulesByRegexp(fmt.Sprintf("%d", port))
defer master.ExecIPTables(removedRules)
if err != nil {
return "", err
}
logger.Infof("Remove the IPV6 ip6tables rules that block the ignition config")
removed6Rules, err := master.RemoveIP6TablesRulesByRegexp(fmt.Sprintf("%d", port))
defer master.ExecIP6Tables(removed6Rules)
if err != nil {
return "", err
}
cmd := []string{"curl", "-s"}
if secure {
cmd = append(cmd, "-k")
}
if ignitionVersion != "" {
cmd = append(cmd, []string{"-H", fmt.Sprintf("Accept:application/vnd.coreos.ignition+json;version=%s", ignitionVersion)}...)
}
cmd = append(cmd, url)
stdout, stderr, err := master.DebugNodeWithChrootStd(cmd...)
if err != nil {
return stdout + stderr, err
}
return stdout, nil
}
// getSelectedNodes returns a list with the nodes that match the .spec.nodeSelector.matchLabels criteria plus the provided extraLabels
func (mcp *MachineConfigPool) getSelectedNodes(extraLabels string) ([]Node, error) {
mcp.oc.NotShowInfo()
defer mcp.oc.SetShowInfo()
labelsString, err := mcp.Get(`{.spec.nodeSelector.matchLabels}`)
if err != nil {
return nil, err
}
labels := JSON(labelsString)
o.Expect(labels.Exists()).Should(o.BeTrue(), fmt.Sprintf("The pool has no matchLabels value defined: %s", mcp.PrettyString()))
nodeList := NewNodeList(mcp.oc)
// Never select windows nodes
requiredLabel := "kubernetes.io/os!=windows"
if extraLabels != "" {
requiredLabel += ","
requiredLabel += extraLabels
}
for k, v := range labels.ToMap() {
requiredLabel += fmt.Sprintf(",%s=%s", k, v.(string))
}
nodeList.ByLabel(requiredLabel)
return nodeList.GetAll()
}
// GetNodesByLabel returns a list with the nodes that belong to the machine config pool and contain the given labels
func (mcp *MachineConfigPool) GetNodesByLabel(labels string) ([]Node, error) {
mcp.oc.NotShowInfo()
defer mcp.oc.SetShowInfo()
nodes, err := mcp.getSelectedNodes(labels)
if err != nil {
return nil, err
}
returnNodes := []Node{}
for _, item := range nodes {
node := item
primaryPool, err := node.GetPrimaryPool()
if err != nil {
return nil, err
}
if primaryPool.GetName() == mcp.GetName() {
returnNodes = append(returnNodes, node)
}
}
return returnNodes, nil
}
// GetNodes returns a list with the nodes that belong to the machine config pool, by default, windows nodes will be excluded
func (mcp *MachineConfigPool) GetNodes() ([]Node, error) {
return mcp.GetNodesByLabel("")
}
// GetNodesWithoutArchitecture returns a list of nodes that belong to this pool and do NOT use the given architectures
func (mcp *MachineConfigPool) GetNodesWithoutArchitecture(arch architecture.Architecture, archs ...architecture.Architecture) ([]Node, error) {
archsList := arch.String()
for _, itemArch := range archs {
archsList = archsList + "," + itemArch.String()
}
return mcp.GetNodesByLabel(fmt.Sprintf(`%s notin (%s)`, architecture.NodeArchitectureLabel, archsList))
}
// GetNodesWithoutArchitectureOrFail returns a list of nodes that belong to this pool and do NOT use the given architectures. It fails the test if any error happens
func (mcp *MachineConfigPool) GetNodesWithoutArchitectureOrFail(arch architecture.Architecture, archs ...architecture.Architecture) []Node {
nodes, err := mcp.GetNodesWithoutArchitecture(arch)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "In MCP %s. Cannot get the nodes NOT using architectures %s", mcp.GetName(), append(archs, arch))
return nodes
}
// GetNodesByArchitecture returns a list of nodes that belong to this pool and use the given architecture
func (mcp *MachineConfigPool) GetNodesByArchitecture(arch architecture.Architecture, archs ...architecture.Architecture) ([]Node, error) {
archsList := arch.String()
for _, itemArch := range archs {
archsList = archsList + "," + itemArch.String()
}
return mcp.GetNodesByLabel(fmt.Sprintf(`%s in (%s)`, architecture.NodeArchitectureLabel, archsList))
}
// GetNodesByArchitecture returns a list of nodes that belong to this pool and use the given architecture. It fails the test if any error happens
func (mcp *MachineConfigPool) GetNodesByArchitectureOrFail(arch architecture.Architecture, archs ...architecture.Architecture) []Node {
nodes, err := mcp.GetNodesByArchitecture(arch)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "In MCP %s. Cannot get the nodes using architectures %s", mcp.GetName(), append(archs, arch))
return nodes
}
// GetNodesOrFail returns a list with the nodes that belong to the machine config pool and fail the test if any error happened
func (mcp *MachineConfigPool) GetNodesOrFail() []Node {
ns, err := mcp.GetNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the nodes in %s MCP", mcp.GetName())
return ns
}
// GetCoreOsNodes returns a list with the CoreOs nodes that belong to the machine config pool
func (mcp *MachineConfigPool) GetCoreOsNodes() ([]Node, error) {
return mcp.GetNodesByLabel("node.openshift.io/os_id=rhcos")
}
// GetCoreOsNodesOrFail returns a list with the coreos nodes that belong to the machine config pool and fail the test if any error happened
func (mcp *MachineConfigPool) GetCoreOsNodesOrFail() []Node {
ns, err := mcp.GetCoreOsNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the coreOS nodes in %s MCP", mcp.GetName())
return ns
}
// GetRhelNodes returns a list with the rhel nodes that belong to the machine config pool
func (mcp *MachineConfigPool) GetRhelNodes() ([]Node, error) {
return mcp.GetNodesByLabel("node.openshift.io/os_id=rhel")
}
// GetRhelNodesOrFail returns a list with the rhel nodes that belong to the machine config pool and fail the test if any error happened
func (mcp *MachineConfigPool) GetRhelNodesOrFail() []Node {
ns, err := mcp.GetRhelNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the rhel nodes in %s MCP", mcp.GetName())
return ns
}
// GetSortedNodes returns a list with the nodes that belong to the machine config pool in the same order used to update them
// when a configuration is applied
func (mcp *MachineConfigPool) GetSortedNodes() ([]Node, error) {
poolNodes, err := mcp.GetNodes()
if err != nil {
return nil, err
}
if !mcp.IsMaster() {
return sortNodeList(poolNodes), nil
}
return sortMasterNodeList(mcp.oc, poolNodes)
}
// GetSortedNodesOrFail returns a list with the nodes that belong to the machine config pool in the same order used to update them
// when a configuration is applied. If any error happens while getting the list, then the test is failed.
func (mcp *MachineConfigPool) GetSortedNodesOrFail() []Node {
nodes, err := mcp.GetSortedNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(),
"Cannot get the list of nodes that belong to '%s' MCP", mcp.GetName())
return nodes
}
// GetSortedUpdatedNodes returns the list of the UpdatedNodes sorted by the time when they started to be updated.
// If maxUnavailable>0, then the function will fail if more that maxUpdatingNodes are being updated at the same time
func (mcp *MachineConfigPool) GetSortedUpdatedNodes(maxUnavailable int) []Node {
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s in pool %s for all nodes to start updating.", timeToWait, mcp.name)
poolNodes, errget := mcp.GetNodes()
o.Expect(errget).NotTo(o.HaveOccurred(), fmt.Sprintf("Cannot get nodes in pool %s", mcp.GetName()))
pendingNodes := poolNodes
updatedNodes := []Node{}
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 20*time.Second, timeToWait, immediate, func(_ context.Context) (bool, error) {
// If there are degraded machines, stop polling, directly fail
degradedstdout, degradederr := mcp.getDegradedMachineCount()
if degradederr != nil {
logger.Errorf("the err:%v, and try next round", degradederr)
return false, nil
}
if degradedstdout != 0 {
logger.Errorf("Degraded MC:\n%s", mcp.PrettyString())
exutil.AssertWaitPollNoErr(fmt.Errorf("Degraded machines"), fmt.Sprintf("mcp %s has degraded %d machines", mcp.name, degradedstdout))
}
// Check that there aren't more thatn maxUpdatingNodes updating at the same time
if maxUnavailable > 0 {
totalUpdating := 0
for _, node := range poolNodes {
if node.IsUpdating() {
totalUpdating++
}
}
if totalUpdating > maxUnavailable {
// print nodes for debug
mcp.oc.Run("get").Args("nodes").Execute()
exutil.AssertWaitPollNoErr(fmt.Errorf("maxUnavailable Not Honored. Pool %s, error: %d nodes were updating at the same time. Only %d nodes should be updating at the same time", mcp.GetName(), totalUpdating, maxUnavailable), "")
}
}
remainingNodes := []Node{}
for _, node := range pendingNodes {
if node.IsUpdating() {
logger.Infof("Node %s is UPDATING", node.GetName())
updatedNodes = append(updatedNodes, node)
} else {
remainingNodes = append(remainingNodes, node)
}
}
if len(remainingNodes) == 0 {
logger.Infof("All nodes have started to be updated on mcp %s", mcp.name)
return true, nil
}
logger.Infof(" %d remaining nodes", len(remainingNodes))
pendingNodes = remainingNodes
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Could not get the list of updated nodes on mcp %s", mcp.name))
return updatedNodes
}
// GetCordonedNodes get cordoned nodes (if maxUnavailable > 1 ) otherwise return the 1st cordoned node
func (mcp *MachineConfigPool) GetCordonedNodes() []Node {
// requirement is: when pool is in updating state, get the updating node list
o.Expect(mcp.WaitForUpdatingStatus()).NotTo(o.HaveOccurred(), "Waiting for Updating status change failed")
// polling all nodes in this pool and check whether all cordoned nodes (SchedulingDisabled)
var allUpdatingNodes []Node
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 10*time.Minute, true, func(_ context.Context) (bool, error) {
nodes, nerr := mcp.GetNodes()
if nerr != nil {
return false, fmt.Errorf("Get all linux node failed, will try again in next run %v", nerr)
}
for _, node := range nodes {
schedulable, serr := node.IsSchedulable()
if serr != nil {
logger.Errorf("Checking node is schedulable failed %v", serr)
continue
}
if !schedulable {
allUpdatingNodes = append(allUpdatingNodes, node)
}
}
return len(allUpdatingNodes) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Could not get the list of updating nodes on mcp %s", mcp.GetName()))
return allUpdatingNodes
}
// GetUnreconcilableNodes get all nodes that value of annotation machineconfiguration.openshift.io/state is Unreconcilable
func (mcp *MachineConfigPool) GetUnreconcilableNodes() ([]Node, error) {
allUnreconcilableNodes := []Node{}
allNodes, err := mcp.GetNodes()
if err != nil {
return nil, err
}
for _, n := range allNodes {
state := n.GetAnnotationOrFail(NodeAnnotationState)
if state == "Unreconcilable" {
allUnreconcilableNodes = append(allUnreconcilableNodes, n)
}
}
return allUnreconcilableNodes, nil
}
// GetUnreconcilableNodesOrFail get all nodes that value of annotation machineconfiguration.openshift.io/state is Unreconcilable
// fail the test if any error occurred
func (mcp *MachineConfigPool) GetUnreconcilableNodesOrFail() []Node {
allUnreconcilableNodes, err := mcp.GetUnreconcilableNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the unreconcilable nodes in %s MCP", mcp.GetName())
return allUnreconcilableNodes
}
// WaitForNotDegradedStatus waits until MCP is not degraded, if the condition times out the returned error is != nil
func (mcp MachineConfigPool) WaitForNotDegradedStatus() error {
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s for MCP %s status to be not degraded.", timeToWait, mcp.name)
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
stdout, err := mcp.GetDegradedStatus()
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, "False") {
logger.Infof("MCP degraded status is False %s", mcp.name)
return true, nil
}
return false, nil
})
if err != nil {
logger.Errorf("MCP: %s .Error waiting for not degraded status: %s", mcp.GetName(), err)
}
return err
}
// WaitForUpdatedStatus waits until MCP is rerpoting updated status, if the condition times out the returned error is != nil
func (mcp MachineConfigPool) WaitForUpdatedStatus() error {
return mcp.waitForConditionStatus("Updated", "True", mcp.estimateWaitDuration(), 1*time.Minute, false)
}
// WaitImmediateForUpdatedStatus waits until MCP is rerpoting updated status, if the condition times out the returned error is != nil. It starts checking immediately.
func (mcp MachineConfigPool) WaitImmediateForUpdatedStatus() error {
return mcp.waitForConditionStatus("Updated", "True", mcp.estimateWaitDuration(), 1*time.Minute, true)
}
// WaitForUpdatingStatus waits until MCP is rerpoting updating status, if the condition times out the returned error is != nil
func (mcp MachineConfigPool) WaitForUpdatingStatus() error {
return mcp.waitForConditionStatus("Updating", "True", 10*time.Minute, 5*time.Second, true)
}
func (mcp MachineConfigPool) waitForConditionStatus(condition, status string, timeout, interval time.Duration, immediate bool) error {
logger.Infof("Waiting %s for MCP %s condition %s to be %s", timeout, mcp.GetName(), condition, status)
err := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, immediate, func(_ context.Context) (bool, error) {
stdout, err := mcp.Get(`{.status.conditions[?(@.type=="` + condition + `")].status}`)
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, status) {
logger.Infof("MCP %s condition %s status is %s", mcp.GetName(), condition, stdout)
return true, nil
}
return false, nil
})
if err != nil {
logger.Errorf("MCP: %s .Error waiting for %s status: %s", mcp.GetName(), condition, err)
}
return err
}
// WaitForMachineCount waits until MCP is rerpoting the desired number of machineCount in the status, if the condition times out the returned error is != nil
func (mcp MachineConfigPool) WaitForMachineCount(expectedMachineCount int, timeToWait time.Duration) error {
logger.Infof("Waiting %s for MCP %s to report %d machine count.", timeToWait, mcp.GetName(), expectedMachineCount)
immediate := true
err := wait.PollUntilContextTimeout(context.TODO(), 30*time.Second, timeToWait, immediate, func(_ context.Context) (bool, error) {
mCount, err := mcp.getMachineCount()
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if mCount == expectedMachineCount {
logger.Infof("MCP is reporting %d machine count", mCount)
return true, nil
}
logger.Infof("Expected machine count %d. Reported machine count %d", expectedMachineCount, mCount)
return false, nil
})
if err != nil {
logger.Errorf("MCP: %s .Error waiting for %d machine count: %s", mcp.GetName(), expectedMachineCount, err)
}
return err
}
func (mcp *MachineConfigPool) waitForComplete() {
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s for MCP %s to be completed.", timeToWait, mcp.name)
waitFunc := func(_ context.Context) (bool, error) {
defer g.GinkgoRecover()
// If there are degraded machines, stop polling, directly fail
degradedstdout, degradederr := mcp.getDegradedMachineCount()
if degradederr != nil {
logger.Errorf("Error getting the number of degraded machines. Try next round: %s", degradederr)
return false, nil
}
if degradedstdout != 0 {
return true, fmt.Errorf("mcp %s has degraded %d machines", mcp.name, degradedstdout)
}
degradedStatus, err := mcp.GetDegradedStatus()
if err != nil {
logger.Errorf("Error getting degraded status.Try next round: %s", err)
return false, nil
}
if degradedStatus != FalseString {
return true, fmt.Errorf("mcp %s has degraded status: %s", mcp.name, degradedStatus)
}
stdout, err := mcp.Get(`{.status.conditions[?(@.type=="Updated")].status}`)
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, "True") {
// i.e. mcp updated=true, mc is applied successfully
logger.Infof("The new MC has been successfully applied to MCP '%s'", mcp.name)
return true, nil
}
return false, nil
}
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, waitFunc)
if err != nil && !strings.Contains(err.Error(), "degraded") {
mccLogs, logErr := NewController(mcp.GetOC()).GetLogs()
if logErr != nil {
logger.Errorf("Error getting MCC logs. Cannot check if drain is taking too long")
} else {
mccLatestLogs := GetLastNLines(mccLogs, 20)
if strings.Contains(mccLatestLogs, "error when evicting") {
logger.Infof("Some pods are taking too long to be evicted:\n%s", mccLatestLogs)
logger.Infof("Waiting for MCP %s another round! %s", mcp.name, timeToWait)
immediate = true
err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, waitFunc)
}
}
}
if err != nil {
exutil.ArchiveMustGatherFile(mcp.GetOC(), extractJournalLogs)
DebugDegradedStatus(mcp)
}
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), fmt.Sprintf("mc operation is not completed on mcp %s: %s", mcp.name, err))
}
// GetPoolSynchronizersStatusByType return the PoolsynchronizesStatus matching the give type
func (mcp *MachineConfigPool) GetPoolSynchronizersStatusByType(pType string) (string, error) {
return mcp.Get(`{.status.poolSynchronizersStatus[?(@.poolSynchronizerType=="` + pType + `")]}`)
}
// IsPinnedImagesComplete returns if the MCP is reporting that there is no pinnedimages operation in progress
func (mcp *MachineConfigPool) IsPinnedImagesComplete() (bool, error) {
pinnedStatus, err := mcp.GetPoolSynchronizersStatusByType("PinnedImageSets")
if err != nil {
return false, err
}
logger.Infof("Pinned status: %s", pinnedStatus)
mcpMachineCount, err := mcp.Get(`{.status.machineCount}`)
if err != nil {
return false, err
}
if mcpMachineCount == "" {
return false, fmt.Errorf("status.machineCount is empty in mcp %s", mcp.GetName())
}
pinnedMachineCount := gjson.Get(pinnedStatus, "machineCount").String()
if pinnedMachineCount == "" {
return false, fmt.Errorf("pinned status machineCount is empty in mcp %s", mcp.GetName())
}
pinnedUnavailableMachineCount := gjson.Get(pinnedStatus, "unavailableMachineCount").String()
if pinnedUnavailableMachineCount == "" {
return false, fmt.Errorf("pinned status unavailableMachineCount is empty in mcp %s", mcp.GetName())
}
updatedMachineCount := gjson.Get(pinnedStatus, "updatedMachineCount").String()
if updatedMachineCount == "" {
return false, fmt.Errorf("pinned status updatedMachineCount is empty in mcp %s", mcp.GetName())
}
return mcpMachineCount == pinnedMachineCount && updatedMachineCount == pinnedMachineCount && pinnedUnavailableMachineCount == "0", nil
}
func (mcp *MachineConfigPool) allNodesReportingPinnedSuccess() (bool, error) {
allNodes, err := mcp.GetNodes()
if err != nil {
return false, err
}
if len(allNodes) == 0 {
logger.Infof("Warning, pool %s has no nodes!! We consider all nodes as correctly pinned", mcp.GetName())
}
for _, node := range allNodes {
nodeMCN := node.GetMachineConfigNode()
if nodeMCN.IsPinnedImageSetsDegraded() {
logger.Infof("Node %s is pinned degraded. Condition:\n%s", node.GetName(), nodeMCN.GetConditionByType("PinnedImageSetsDegraded"))
return false, nil
}
if nodeMCN.IsPinnedImageSetsProgressing() {
return false, nil
}
}
return true, nil
}
// waitForPinComplete waits until all images are pinned in the MCP. It fails the test case if the images are not pinned
func (mcp *MachineConfigPool) waitForPinComplete(timeToWait time.Duration) error {
logger.Infof("Waiting %s for MCP %s to complete pinned images.", timeToWait, mcp.name)
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
pinnedComplete, err := mcp.IsPinnedImagesComplete()
if err != nil {
logger.Infof("Error getting pinned complete: %s", err)
return false, err
}
if !pinnedComplete {
logger.Infof("Waiting for PinnedImageSets poolSynchronizersStatus status to repot success")
return false, nil
}
allNodesComplete, err := mcp.allNodesReportingPinnedSuccess()
if err != nil {
logger.Infof("Error getting if all nodes finished")
return false, err
}
if !allNodesComplete {
logger.Infof("Waiting for all nodes to report pinned images success")
return false, nil
}
logger.Infof("Pool %s successfully pinned the images! Complete!", mcp.GetName())
return true, nil
})
if err != nil {
logger.Infof("Pinned images operation is not completed on mcp %s", mcp.name)
}
return err
}
// waitForPinApplied waits until MCP reports that it has started to pin images, and then waits until all images are pinned. It fails the test case if the images are not pinned
// Because everything is cached in the pinnedimageset controller, it can happen that if the images are already pinned, the status change is too fast and we can miss it
// This is a problem when we execute test cases that have been previously executed. In order to use this method we need to make sure that the pinned images are not present in the nodes
// or the test will become unstable.
func (mcp *MachineConfigPool) waitForPinApplied(timeToWait time.Duration) error {
logger.Infof("Waiting %s for MCP %s to apply pinned images.", timeToWait, mcp.name)
immediate := true
pinnedStarted := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
pinnedComplete, err := mcp.IsPinnedImagesComplete()
if err != nil {
logger.Infof("Error getting pinned complete: %s", err)
return false, err
}
if !pinnedStarted && !pinnedComplete {
pinnedStarted = true
logger.Infof("Pool %s has started to pin images", mcp.GetName())
}
if pinnedStarted {
if !pinnedComplete {
logger.Infof("Waiting for PinnedImageSets poolSynchronizersStatus status to repot success")
return false, nil
}
allNodesComplete, err := mcp.allNodesReportingPinnedSuccess()
if err != nil {
logger.Infof("Error getting if all nodes finished")
return false, err
}
if !allNodesComplete {
logger.Infof("Waiting for all nodes to report pinned images success")
return false, nil
}
logger.Infof("Pool %s successfully pinned the images! Complete!", mcp.GetName())
return true, nil
}
logger.Infof("Pool %s has not started to pin images yet", mcp.GetName())
return false, nil
})
if err != nil {
logger.Infof("Pinned images operation is not applied on mcp %s", mcp.name)
}
return err
}
// GetReportedOsImageOverrideValue returns the value of the os_image_url_override prometheus metric for this pool
func (mcp *MachineConfigPool) GetReportedOsImageOverrideValue() (string, error) {
query := fmt.Sprintf(`os_image_url_override{pool="%s"}`, strings.ToLower(mcp.GetName()))
mon, err := exutil.NewMonitor(mcp.oc.AsAdmin())
if err != nil {
return "", err
}
osImageOverride, err := mon.SimpleQuery(query)
if err != nil {
return "", err
}
jsonOsImageOverride := JSON(osImageOverride)
status := jsonOsImageOverride.Get("status").ToString()
if status != "success" {
return "", fmt.Errorf("Query %s execution failed: %s", query, osImageOverride)
}
logger.Infof("%s metric is:%s", query, osImageOverride)
metricValue := JSON(osImageOverride).Get("data").Get("result").Item(0).Get("value").Item(1).ToString()
return metricValue, nil
}
// RecoverFromDegraded updates the current and desired machine configs so that the pool can recover from degraded state once the offending MC is deleted
func (mcp *MachineConfigPool) RecoverFromDegraded() error {
logger.Infof("Recovering %s pool from degraded status", mcp.GetName())
mcpNodes, _ := mcp.GetNodes()
for _, node := range mcpNodes {
logger.Infof("Restoring desired config in node: %s", node)
if node.IsUpdated() {
logger.Infof("node is updated, don't need to recover")
} else {
err := node.RestoreDesiredConfig()
if err != nil {
return fmt.Errorf("Error restoring desired config in node %s. Error: %s",
mcp.GetName(), err)
}
}
}
derr := mcp.WaitForNotDegradedStatus()
if derr != nil {
logger.Infof("Could not recover from the degraded status: %s", derr)
return derr
}
uerr := mcp.WaitForUpdatedStatus()
if uerr != nil {
logger.Infof("Could not recover from the degraded status: %s", uerr)
return uerr
}
return nil
}
// IsRealTimeKernel returns true if the pool is using a realtime kernel
func (mcp *MachineConfigPool) IsRealTimeKernel() (bool, error) {
nodes, err := mcp.GetNodes()
if err != nil {
logger.Errorf("Error getting the nodes in pool %s", mcp.GetName())
return false, err
}
return nodes[0].IsRealTimeKernel()
}
// GetConfiguredMachineConfig return the MachineConfig currently configured in the pool
func (mcp *MachineConfigPool) GetConfiguredMachineConfig() (*MachineConfig, error) {
currentMcName, err := mcp.Get("{.status.configuration.name}")
if err != nil {
logger.Errorf("Error getting the currently configured MC in pool %s: %s", mcp.GetName(), err)
return nil, err
}
logger.Debugf("The currently configured MC in pool %s is: %s", mcp.GetName(), currentMcName)
return NewMachineConfig(mcp.oc, currentMcName, mcp.GetName()), nil
}
// SanityCheck returns an error if the MCP is Degraded or Updating.
// We can't use WaitForUpdatedStatus or WaitForNotDegradedStatus because they always wait the interval. In a sanity check we want a fast response.
func (mcp *MachineConfigPool) SanityCheck() error {
timeToWait := mcp.estimateWaitDuration() / 13
logger.Infof("Waiting %s for MCP %s to be completed.", timeToWait.Round(time.Second), mcp.name)
const trueStatus = "True"
var message string
immediate := true
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
// If there are degraded machines, stop polling, directly fail
degraded, degradederr := mcp.GetDegradedStatus()
if degradederr != nil {
message = fmt.Sprintf("Error gettting Degraded status: %s", degradederr)
return false, nil
}
if degraded == trueStatus {
message = fmt.Sprintf("MCP '%s' is degraded", mcp.GetName())
return false, nil
}
updated, err := mcp.GetUpdatedStatus()
if err != nil {
message = fmt.Sprintf("Error gettting Updated status: %s", err)
return false, nil
}
if updated == trueStatus {
logger.Infof("MCP '%s' is ready for testing", mcp.name)
return true, nil
}
message = fmt.Sprintf("MCP '%s' is not updated", mcp.GetName())
return false, nil
})
if err != nil {
return fmt.Errorf(message)
}
return nil
}
// GetCertsExpiry returns the information about the certificates trackec by the MCP
func (mcp *MachineConfigPool) GetCertsExpiry() ([]CertExpiry, error) {
expiryString, err := mcp.Get(`{.status.certExpirys}`)
if err != nil {
return nil, err
}
var certsExp []CertExpiry
jsonerr := json.Unmarshal([]byte(expiryString), &certsExp)
if jsonerr != nil {
return nil, jsonerr
}
return certsExp, nil
}
// GetArchitectures returns the list of architectures that the nodes in this pool are using
func (mcp *MachineConfigPool) GetArchitectures() ([]architecture.Architecture, error) {
archs := []architecture.Architecture{}
nodes, err := mcp.GetNodes()
if err != nil {
return archs, err
}
for _, node := range nodes {
archs = append(archs, node.GetArchitectureOrFail())
}
return archs, nil
}
// GetArchitecturesOrFail returns the list of architectures that the nodes in this pool are using, if there is any error it fails the test
func (mcp *MachineConfigPool) GetArchitecturesOrFail() []architecture.Architecture {
archs, err := mcp.GetArchitectures()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error getting the architectures used by nodes in MCP %s", mcp.GetName())
return archs
}
// AllNodesUseArch return true if all the nodes in the pool has the given architecture
func (mcp *MachineConfigPool) AllNodesUseArch(arch architecture.Architecture) bool {
for _, currentArch := range mcp.GetArchitecturesOrFail() {
if arch != currentArch {
return false
}
}
return true
}
// CaptureAllNodeLogsBeforeRestart will poll the logs of every node in the pool until thy are restarted and will return them once all nodes have been restarted
func (mcp *MachineConfigPool) CaptureAllNodeLogsBeforeRestart() (map[string]string, error) {
type nodeLogs struct {
nodeName string
nodeLogs string
err error
}
returnMap := map[string]string{}
c := make(chan nodeLogs)
var wg sync.WaitGroup
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s until all nodes nodes %s MCP are restarted and their logs are captured before restart", timeToWait.String(), mcp.GetName())
nodes, err := mcp.GetNodes()
if err != nil {
return nil, err
}
for _, item := range nodes {
node := item
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
logger.Infof("Capturing node %s logs until restart", node.GetName())
logs, err := node.CaptureMCDaemonLogsUntilRestartWithTimeout(timeToWait.String())
if err != nil {
logger.Errorf("Error while tring to capture the MCD lgos in node %s before restart", node.GetName())
} else {
logger.Infof("Captured MCD logs before node %s was rebooted", node.GetName())
}
c <- nodeLogs{nodeName: node.GetName(), nodeLogs: logs, err: err}
}()
}
// We are using a 0 size channel, so every previous channel call will be locked on "c <- nodeLogs" if we directly call wg.Wait because noone is already reading
// One solution is to wait inside a goroutine and close the channel once every node has reported his log
// Another solution could be to use a channel with a size = len(nodes) like `c := make(chan nodeLogs, len(nodes))` so that all tasks can write in the channel without being locked
go func() {
defer g.GinkgoRecover()
logger.Infof("Waiting for all pre-reboot logs to be collected")
wg.Wait()
logger.Infof("All logs collected. Closing the channel")
close(c)
}()
// Here we read from the channel and unlock the "c <- nodeLogs" instruction. If we call wg.Wait before this point, tasks will be locked there forever
for nl := range c {
if nl.err != nil {
return nil, err
}
returnMap[nl.nodeName] = nl.nodeLogs
}
return returnMap, nil
}
// GetPinnedImageSets returns a list with the nodes that match the .spec.nodeSelector.matchLabels criteria plus the provided extraLabels
func (mcp *MachineConfigPool) GetPinnedImageSets() ([]PinnedImageSet, error) {
mcp.oc.NotShowInfo()
defer mcp.oc.SetShowInfo()
labelsString, err := mcp.Get(`{.spec.machineConfigSelector.matchLabels}`)
if err != nil {
return nil, err
}
if labelsString == "" {
return nil, fmt.Errorf("No machineConfigSelector found in %s", mcp)
}
labels := gjson.Parse(labelsString)
requiredLabel := ""
labels.ForEach(func(key, value gjson.Result) bool {
requiredLabel += fmt.Sprintf("%s=%s,", key.String(), value.String())
return true // keep iterating
})
if requiredLabel == "" {
return nil, fmt.Errorf("No labels matcher could be built for %s", mcp)
}
// remove the last comma
requiredLabel = strings.TrimSuffix(requiredLabel, ",")
pisList := NewPinnedImageSetList(mcp.oc)
pisList.ByLabel(requiredLabel)
return pisList.GetAll()
}
// Reboot reboot all nodes in the pool by using command "oc adm reboot-machine-config-pool mcp/POOLNAME"
func (mcp *MachineConfigPool) Reboot() error {
logger.Infof("Rebooting nodes in pool %s", mcp.GetName())
return mcp.oc.WithoutNamespace().Run("adm").Args("reboot-machine-config-pool", "mcp/"+mcp.GetName()).Execute()
}
// WaitForRebooted wait for the "Reboot" method to actually reboot all nodes by using command "oc adm wait-for-node-reboot nodes -l node-role.kubernetes.io/POOLNAME"
func (mcp *MachineConfigPool) WaitForRebooted() error {
logger.Infof("Waiting for nodes in pool %s to be rebooted", mcp.GetName())
return mcp.oc.WithoutNamespace().Run("adm").Args("wait-for-node-reboot", "nodes", "-l", "node-role.kubernetes.io/"+mcp.GetName()).Execute()
}
// GetMOSC returns the MachineOSConfig resource for this pool
func (mcp MachineConfigPool) GetMOSC() (*MachineOSConfig, error) {
moscList := NewMachineOSConfigList(mcp.GetOC())
moscList.SetItemsFilter(`?(@.spec.machineConfigPool.name=="` + mcp.GetName() + `")`)
moscs, err := moscList.GetAll()
if err != nil {
return nil, err
}
if len(moscs) > 1 {
moscList.PrintDebugCommand()
return nil, fmt.Errorf("There are more than one MOSC for pool %s", mcp.GetName())
}
if len(moscs) == 0 {
return nil, nil
}
return &(moscs[0]), nil
}
// IsOCL returns true if the pool is using On Cluster Layering functionality
func (mcp MachineConfigPool) IsOCL() (bool, error) {
isOCLEnabled, err := IsFeaturegateEnabled(mcp.GetOC(), "OnClusterBuild")
if err != nil {
return false, err
}
if !isOCLEnabled {
logger.Infof("IS pool %s OCL: false", mcp.GetName())
return false, nil
}
mosc, err := mcp.GetMOSC()
if err != nil {
return false, err
}
isOCL := mosc != nil
logger.Infof("IS pool %s OCL: %t", mcp.GetName(), isOCL)
return isOCL, err
}
// GetLatestMachineOSBuild returns the latest MachineOSBuild created for this MCP
func (mcp *MachineConfigPool) GetLatestMachineOSBuildOrFail() *MachineOSBuild {
return NewMachineOSBuild(mcp.oc, fmt.Sprintf("%s-%s-builder", mcp.GetName(), mcp.getConfigNameOfSpecOrFail()))
}
// GetAll returns a []MachineConfigPool list with all existing machine config pools sorted by creation time
func (mcpl *MachineConfigPoolList) GetAll() ([]MachineConfigPool, error) {
mcpl.ResourceList.SortByTimestamp()
allMCPResources, err := mcpl.ResourceList.GetAll()
if err != nil {
return nil, err
}
allMCPs := make([]MachineConfigPool, 0, len(allMCPResources))
for _, mcpRes := range allMCPResources {
allMCPs = append(allMCPs, *NewMachineConfigPool(mcpl.oc, mcpRes.name))
}
return allMCPs, nil
}
// GetAllOrFail returns a []MachineConfigPool list with all existing machine config pools sorted by creation time, if any error happens it fails the test
func (mcpl *MachineConfigPoolList) GetAllOrFail() []MachineConfigPool {
mcps, err := mcpl.GetAll()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error getting the list of existing MCP in the cluster")
return mcps
}
// waitForComplete waits until all MCP in the list are updated
func (mcpl *MachineConfigPoolList) waitForComplete() {
mcps, err := mcpl.GetAll()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error getting the list of existing MCP in the cluster")
// We always wait for master first, to make sure that we avoid problems in SNO
for _, mcp := range mcps {
if mcp.IsMaster() {
mcp.waitForComplete()
break
}
}
for _, mcp := range mcps {
if !mcp.IsMaster() {
mcp.waitForComplete()
}
}
}
// GetCompactCompatiblePool returns worker pool if the cluster is not compact/SNO. Else it will return master pool or custom pool if worker pool is empty.
// Current logic:
// If worker pool has nodes, we return worker pool
// Else if worker pool is empty
//
// If custom pools exist
// If any custom pool has nodes, we return the custom pool
// Else (all custom pools are empty) we are in a Compact/SNO cluster with extra empty custom pools, we return master
// Else (worker pool is empty and there is no custom pool) we are in a Compact/SNO cluster, we return master
func GetCompactCompatiblePool(oc *exutil.CLI) *MachineConfigPool {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
mcpList = NewMachineConfigPoolList(oc)
)
mcpList.PrintDebugCommand()
if IsCompactOrSNOCluster(oc) {
return mMcp
}
if !wMcp.IsEmpty() {
return wMcp
}
// The cluster is not Compact/SNO but the the worker pool is empty. All nodes have been moved to one or several custom pool
for _, mcp := range mcpList.GetAllOrFail() {
if mcp.IsCustom() && !mcp.IsEmpty() { // All worker pools were moved to cutom pools
logger.Infof("Worker pool is empty, but there is a custom pool with nodes. Proposing %s MCP for testing", mcp.GetName())
return &mcp
}
}
e2e.Failf("Something went wrong. There is no suitable pool to execute the test case")
return nil
}
// GetCoreOsCompatiblePool returns worker pool if it has CoreOs nodes. If there is no CoreOs node in the worker pool, then it returns master pool.
func GetCoreOsCompatiblePool(oc *exutil.CLI) *MachineConfigPool {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
if len(wMcp.GetCoreOsNodesOrFail()) == 0 {
logger.Infof("No CoreOs nodes in the worker pool. Using master pool for testing")
return mMcp
}
return wMcp
}
// CreateCustomMCPByLabel Creates a new custom MCP using the nodes in the worker pool with the given label. If numNodes < 0, we will add all existing nodes to the custom pool
// If numNodes == 0, no node will be added to the new custom pool.
func CreateCustomMCPByLabel(oc *exutil.CLI, name, label string, numNodes int) (*MachineConfigPool, error) {
wMcp := NewMachineConfigPool(oc, MachineConfigPoolWorker)
nodes, err := wMcp.GetNodesByLabel(label)
if err != nil {
logger.Errorf("Could not get the nodes with %s label", label)
return nil, err
}
if len(nodes) < numNodes {
return nil, fmt.Errorf("The worker MCP only has %d nodes, it is not possible to take %d nodes from worker pool to create a custom pool",
len(nodes), numNodes)
}
customMcpNodes := []Node{}
for i, item := range nodes {
n := item
if numNodes > 0 && i >= numNodes {
break
}
customMcpNodes = append(customMcpNodes, n)
}
return CreateCustomMCPByNodes(oc, name, customMcpNodes)
}
// CreateCustomMCP create a new custom MCP with the given name and the given number of nodes
// Nodes will be taken from the worker pool
func CreateCustomMCP(oc *exutil.CLI, name string, numNodes int) (*MachineConfigPool, error) {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
workerNodes, err := wMcp.GetNodes()
if err != nil {
return NewMachineConfigPool(oc, name), err
}
if numNodes > len(workerNodes) {
return NewMachineConfigPool(oc, name), fmt.Errorf("A %d nodes custom pool cannot be created because there are only %d nodes in the %s pool",
numNodes, len(workerNodes), wMcp.GetName())
}
return CreateCustomMCPByNodes(oc, name, workerNodes[0:numNodes])
}
// CreateCustomMCPByNodes creates a new MCP containing the nodes provided in the "nodes" parameter
func CreateCustomMCPByNodes(oc *exutil.CLI, name string, nodes []Node) (*MachineConfigPool, error) {
customMcp := NewMachineConfigPool(oc, name)
err := NewMCOTemplate(oc, "custom-machine-config-pool.yaml").Create("-p", fmt.Sprintf("NAME=%s", name))
if err != nil {
logger.Errorf("Could not create a custom MCP for worker nodes with nodes %s", nodes)
return customMcp, err
}
for _, n := range nodes {
err := n.AddLabel(fmt.Sprintf("node-role.kubernetes.io/%s", name), "")
if err != nil {
logger.Infof("Error labeling node %s to add it to pool %s", n.GetName(), customMcp.GetName())
}
logger.Infof("Node %s added to custom pool %s", n.GetName(), customMcp.GetName())
}
expectedNodes := len(nodes)
err = customMcp.WaitForMachineCount(expectedNodes, 5*time.Minute)
if err != nil {
logger.Errorf("The %s MCP is not reporting the expected machine count", customMcp.GetName())
return customMcp, err
}
err = customMcp.WaitImmediateForUpdatedStatus()
if err != nil {
logger.Errorf("The %s MCP is not updated", customMcp.GetName())
return customMcp, err
}
return customMcp, nil
}
// DeleteCustomMCP deletes a custom MCP properly unlabeling the nodes first
func DeleteCustomMCP(oc *exutil.CLI, name string) error {
mcp := NewMachineConfigPool(oc, name)
if !mcp.Exists() {
logger.Infof("MCP %s does not exist. No need to remove it", mcp.GetName())
return nil
}
exutil.By(fmt.Sprintf("Removing custom MCP %s", name))
nodes, err := mcp.GetNodes()
if err != nil {
logger.Errorf("Could not get the nodes that belong to MCP %s: %s", mcp.GetName(), err)
return err
}
label := fmt.Sprintf("node-role.kubernetes.io/%s", mcp.GetName())
for _, node := range nodes {
logger.Infof("Removing pool label from node %s", node.GetName())
err := node.RemoveLabel(label)
if err != nil {
logger.Errorf("Could not remove the role label from node %s: %s", node.GetName(), err)
return err
}
}
for _, node := range nodes {
err := node.WaitForLabelRemoved(label)
if err != nil {
logger.Errorf("The label %s was not removed from node %s", label, node.GetName())
}
}
err = mcp.WaitForMachineCount(0, 5*time.Minute)
if err != nil {
logger.Errorf("The %s MCP already contains nodes, it cannot be deleted: %s", mcp.GetName(), err)
return err
}
// Wait for worker MCP to be updated before removing the custom pool
// in order to make sure that no node has any annotation pointing to resources that depend on the custom pool that we want to delete
wMcp := NewMachineConfigPool(oc, MachineConfigPoolWorker)
err = wMcp.WaitForUpdatedStatus()
if err != nil {
logger.Errorf("The worker MCP was not ready after removing the custom pool: %s", err)
wMcp.PrintDebugCommand()
return err
}
err = mcp.Delete()
if err != nil {
logger.Errorf("The %s MCP could not be deleted: %s", mcp.GetName(), err)
return err
}
logger.Infof("OK!\n")
return nil
}
// GetPoolAndNodesForArchitectureOrFail returns a MCP in this order of priority:
// 1) The master pool if it is a arm64 compact/SNO cluster.
// 2) A custom pool with 1 arm node in it if there are arm nodes in the worker pool.
// 3) Any existing custom MCP with all nodes using arm64
// 4) The master pools if the master pool is arm64
func GetPoolAndNodesForArchitectureOrFail(oc *exutil.CLI, createMCPName string, arch architecture.Architecture, numNodes int) (*MachineConfigPool, []Node) {
var (
wMcp = NewMachineConfigPool(oc, MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc, MachineConfigPoolMaster)
masterHasTheRightArch = mMcp.AllNodesUseArch(arch)
mcpList = NewMachineConfigPoolList(oc)
)
mcpList.PrintDebugCommand()
if masterHasTheRightArch && IsCompactOrSNOCluster(oc) {
return mMcp, mMcp.GetNodesOrFail()
}
// we check if there is an already existing pool with all its nodes using the requested architecture
for _, pool := range mcpList.GetAllOrFail() {
if !pool.IsCustom() {
continue
}
// If there isn't a node with the requested architecture in the worker pool,
// but there is a custom pool where all nodes have this architecture
if !pool.IsEmpty() && pool.AllNodesUseArch(arch) {
logger.Infof("Using the predefined MCP %s", pool.GetName())
return &pool, pool.GetNodesOrFail()
}
logger.Infof("The predefined %s MCP exists, but it is not suitable for testing", pool.GetName())
}
// If there are nodes with the rewquested architecture in the worker pool we build our own custom MCP
if len(wMcp.GetNodesByArchitectureOrFail(arch)) > 0 {
var err error
mcp, err := CreateCustomMCPByLabel(oc.AsAdmin(), createMCPName, fmt.Sprintf(`%s=%s`, architecture.NodeArchitectureLabel, arch), numNodes)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the custom pool for infrastructure %s", architecture.ARM64)
return mcp, mcp.GetNodesOrFail()
}
// If we are in a HA cluster but worker nor custom pools meet the achitecture conditions for the test
// we return the master pool if it is using the right architecture
if masterHasTheRightArch {
logger.Infof("The cluster is not a Compact/SNO cluster and there are no %s worker nodes available for testing. We will use the master pool.", arch)
return mMcp, mMcp.GetNodesOrFail()
}
e2e.Failf("Something went wrong. There is no suitable pool to execute the test case using architecture %s", arch)
return nil, nil
}
// GetPoolAndNodesForNoArchitectureOrFail returns a MCP in this order of priority:
// 1) The master pool if it is a arm64 compact/SNO cluster.
// 2) First pool that is not master and contains any node NOT using the given architecture
func GetPoolWithArchDifferentFromOrFail(oc *exutil.CLI, arch architecture.Architecture) *MachineConfigPool {
var (
mcpList = NewMachineConfigPoolList(oc)
mMcp = NewMachineConfigPool(oc, MachineConfigPoolMaster)
)
mcpList.PrintDebugCommand()
// we check if there is an already existing pool with all its nodes using the requested architecture
for _, pool := range mcpList.GetAllOrFail() {
if pool.IsMaster() {
continue
}
// If there isn't a node with the requested architecture in the worker pool,
// but there is a custom pool where all nodes have this architecture
if !pool.IsEmpty() && len(pool.GetNodesWithoutArchitectureOrFail(arch)) > 0 {
logger.Infof("Using pool %s", pool.GetName())
return &pool
}
}
// It includes compact and SNO
if len(mMcp.GetNodesWithoutArchitectureOrFail(arch)) > 0 {
return mMcp
}
e2e.Failf("Something went wrong. There is no suitable pool to execute the test case. There is no pool with nodes using an architecture different from %s", arch)
return nil
}
// DebugDegradedStatus prints the necessary information to debug why a MCP became degraded
func DebugDegradedStatus(mcp *MachineConfigPool) {
var (
nodeList = NewNodeList(mcp.GetOC())
mcc = NewController(mcp.GetOC())
maxMCCLines = 30
maxMCDLines = 30
)
logger.Infof("START DEBUG")
_ = mcp.GetOC().Run("get").Args("co", "machine-config").Execute()
_ = mcp.GetOC().Run("get").Args("mcp").Execute()
_ = mcp.GetOC().Run("get").Args("nodes", "-o", "wide").Execute()
logger.Infof("Not updated MCP %s", mcp.GetName())
logger.Infof("%s", mcp.PrettyString())
logger.Infof("#######################\n\n")
allNodes, err := nodeList.GetAll()
if err == nil {
for _, node := range allNodes {
state := node.GetMachineConfigState()
if state != "Done" {
logger.Infof("NODE %s IS %s", node.GetName(), state)
logger.Infof("%s", node.PrettyString())
logger.Infof("#######################\n\n")
mcdLogs, err := node.GetMCDaemonLogs("")
if err != nil {
logger.Infof("Error getting MCD logs for node %s", node.GetName())
}
logger.Infof("Node %s MCD logs:\n%s", node.GetName(), GetLastNLines(mcdLogs, maxMCDLines))
logger.Infof("#######################\n\n")
logger.Infof("MachineConfigNode:\n%s", node.GetMachineConfigNode().PrettyString())
logger.Infof("#######################\n\n")
}
}
} else {
logger.Infof("Error getting the list of degraded nodes: %s", err)
}
mccLogs, err := mcc.GetLogs()
if err != nil {
logger.Infof("Error getting the logs from MCC: %s", err)
}
logger.Infof("Last %d lines of MCC:\n%s", maxMCCLines, GetLastNLines(mccLogs, maxMCCLines))
logger.Infof("END DEBUG")
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
30b2897b-ed06-44ee-aacd-68532b9ce2c4
|
NewMachineConfigPool
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func NewMachineConfigPool(oc *exutil.CLI, name string) *MachineConfigPool {
return &MachineConfigPool{Resource: *NewResource(oc, "mcp", name), MinutesWaitingPerNode: DefaultMinutesWaitingPerNode}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
4b39acae-2172-488d-b29a-913fbf7f9cd7
|
NewMachineConfigPoolList
|
['MachineConfigPoolList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func NewMachineConfigPoolList(oc *exutil.CLI) *MachineConfigPoolList {
return &MachineConfigPoolList{*NewResourceList(oc, "mcp")}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a8e44ef1-aedd-4a9c-9f56-8c71e810f8bb
|
create
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) create() {
exutil.CreateClusterResourceFromTemplate(mcp.oc, "--ignore-unknown-parameters=true", "-f", mcp.template, "-p", "NAME="+mcp.name)
mcp.waitForComplete()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
585e7369-7665-4690-bb7b-8f66fe296294
|
delete
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) delete() {
logger.Infof("deleting custom mcp: %s", mcp.name)
err := mcp.oc.AsAdmin().WithoutNamespace().Run("delete").Args("mcp", mcp.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
6fb3c47a-c69d-4097-b3d6-43e5ed35c791
|
pause
|
['"strconv"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) pause(enable bool) {
logger.Infof("patch mcp %v, change spec.paused to %v", mcp.name, enable)
err := mcp.Patch("merge", `{"spec":{"paused": `+strconv.FormatBool(enable)+`}}`)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
34ab673f-9200-4de3-b042-2623ead9330f
|
IsPaused
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsPaused() bool {
return IsTrue(mcp.GetOrFail(`{.spec.paused}`))
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
4d03c65d-2e31-4875-b4d4-afd18761f076
|
IsCustom
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsCustom() bool {
return !mcp.IsMaster() && !mcp.IsWorker()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f930c8d8-67e9-4771-9d7a-acf0c9130d26
|
IsMaster
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsMaster() bool {
return mcp.GetName() == MachineConfigPoolMaster
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
22e4365b-93ed-4ea5-949a-0c8582f4acfc
|
IsWorker
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsWorker() bool {
return mcp.GetName() == MachineConfigPoolWorker
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
6b96343b-2867-4794-b47a-b16ef27518d2
|
IsEmpty
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsEmpty() bool {
var (
numNodes int
)
o.Eventually(func() (err error) {
numNodes, err = mcp.getMachineCount()
return err
}, "2m", "10s").Should(o.Succeed(),
"It was not possible to get the status.machineCount value for MPC %s", mcp.GetName())
return numNodes == 0
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
fe93d251-7190-419f-9e96-f06eb695635c
|
GetMaxUnavailableInt
|
['"strconv"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetMaxUnavailableInt() (int, error) {
maxUnavailableString, err := mcp.Get(`{.spec.maxUnavailable}`)
if err != nil {
return -1, err
}
if maxUnavailableString == "" {
logger.Infof("maxUnavailable not configured in mcp %s, default value is 1", mcp.GetName())
return 1, nil
}
maxUnavailableInt, convErr := strconv.Atoi(maxUnavailableString)
if convErr != nil {
logger.Errorf("Error converting maxUnavailableString to integer: %s", convErr)
return -1, convErr
}
return maxUnavailableInt, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
7210db3c-f5ef-49ef-a8d5-e5ed80ac27ba
|
SetMaxUnavailable
|
['"fmt"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) SetMaxUnavailable(maxUnavailable int) {
logger.Infof("patch mcp %v, change spec.maxUnavailable to %d", mcp.name, maxUnavailable)
err := mcp.Patch("merge", fmt.Sprintf(`{"spec":{"maxUnavailable": %d}}`, maxUnavailable))
o.Expect(err).NotTo(o.HaveOccurred())
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
1bff3a2f-8032-4bcd-b338-c6fec86cc669
|
RemoveMaxUnavailable
|
['"encoding/json"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) RemoveMaxUnavailable() {
logger.Infof("patch mcp %v, removing spec.maxUnavailable", mcp.name)
err := mcp.Patch("json", `[{ "op": "remove", "path": "/spec/maxUnavailable" }]`)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
2b01c3bf-ccee-44b9-8849-a2550fe25fbe
|
getConfigNameOfSpec
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getConfigNameOfSpec() (string, error) {
output, err := mcp.Get(`{.spec.configuration.name}`)
logger.Infof("spec.configuration.name of mcp/%v is %v", mcp.name, output)
return output, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d7ab3b6f-cd5e-414b-a23f-83cb10b04791
|
getConfigNameOfSpecOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getConfigNameOfSpecOrFail() string {
config, err := mcp.getConfigNameOfSpec()
o.Expect(err).NotTo(o.HaveOccurred(), "Get config name of spec failed")
return config
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d19c4fd0-d80d-461d-b6de-978d86c5ef18
|
getConfigNameOfStatus
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getConfigNameOfStatus() (string, error) {
output, err := mcp.Get(`{.status.configuration.name}`)
logger.Infof("status.configuration.name of mcp/%v is %v", mcp.name, output)
return output, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
64eae2c6-a68a-4588-88bb-ca9a157348ef
|
getConfigNameOfStatusOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getConfigNameOfStatusOrFail() string {
config, err := mcp.getConfigNameOfStatus()
o.Expect(err).NotTo(o.HaveOccurred(), "Get config name of status failed")
return config
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
173a81bc-ff4b-4cc8-9bca-bb9e010309a7
|
getMachineCount
|
['"fmt"', '"strconv"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getMachineCount() (int, error) {
machineCountStr, ocErr := mcp.Get(`{.status.machineCount}`)
if ocErr != nil {
logger.Infof("Error getting machineCount: %s", ocErr)
return -1, ocErr
}
if machineCountStr == "" {
return -1, fmt.Errorf(".status.machineCount value is not already set in MCP %s", mcp.GetName())
}
machineCount, convErr := strconv.Atoi(machineCountStr)
if convErr != nil {
logger.Errorf("Error converting machineCount to integer: %s", ocErr)
return -1, convErr
}
return machineCount, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
dbf26548-1a97-413b-a970-3ac50763a01f
|
getDegradedMachineCount
|
['"strconv"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getDegradedMachineCount() (int, error) {
dmachineCountStr, ocErr := mcp.Get(`{.status.degradedMachineCount}`)
if ocErr != nil {
logger.Errorf("Error getting degradedmachineCount: %s", ocErr)
return -1, ocErr
}
dmachineCount, convErr := strconv.Atoi(dmachineCountStr)
if convErr != nil {
logger.Errorf("Error converting degradedmachineCount to integer: %s", ocErr)
return -1, convErr
}
return dmachineCount, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
721abd42-c21b-48bd-80c2-db6eb6bb06b9
|
getUpdatedMachineCount
|
['"strconv"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getUpdatedMachineCount() (int, error) {
umachineCountStr, ocErr := mcp.Get(`{.status.updatedMachineCount}`)
if ocErr != nil {
logger.Errorf("Error getting updatedMachineCount: %s", ocErr)
return -1, ocErr
}
umachineCount, convErr := strconv.Atoi(umachineCountStr)
if convErr != nil {
logger.Errorf("Error converting updatedMachineCount to integer: %s", ocErr)
return -1, convErr
}
return umachineCount, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
1aeefcf8-f698-46d6-bbc4-f7fbe893106c
|
pollMachineCount
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) pollMachineCount() func() string {
return mcp.Poll(`{.status.machineCount}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
78bdcb1c-7cae-4e68-84df-ac8338881289
|
pollReadyMachineCount
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) pollReadyMachineCount() func() string {
return mcp.Poll(`{.status.readyMachineCount}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
be7ad851-b143-4dbb-b2d8-c8bb4fa4b2e8
|
pollDegradedMachineCount
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) pollDegradedMachineCount() func() string {
return mcp.Poll(`{.status.degradedMachineCount}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f3b3e893-569d-440c-87f5-b82fb0e35e10
|
GetDegradedStatus
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetDegradedStatus() (string, error) {
return mcp.Get(`{.status.conditions[?(@.type=="Degraded")].status}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
ac5c359c-6ae1-41fd-a28d-c75b7b3fd12a
|
pollDegradedStatus
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) pollDegradedStatus() func() string {
return mcp.Poll(`{.status.conditions[?(@.type=="Degraded")].status}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
83db3b54-6598-4384-ae43-4c04176fc33e
|
GetUpdatedStatus
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetUpdatedStatus() (string, error) {
return mcp.Get(`{.status.conditions[?(@.type=="Updated")].status}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
12c7bcaf-640b-4236-8336-253cebf7450b
|
GetUpdatingStatus
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetUpdatingStatus() (string, error) {
return mcp.Get(`{.status.conditions[?(@.type=="Updating")].status}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
c3c1258c-2f51-49d0-b466-28e7edf1fa13
|
pollUpdatedStatus
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) pollUpdatedStatus() func() string {
return mcp.Poll(`{.status.conditions[?(@.type=="Updated")].status}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
b3e25c8a-2536-480f-a370-3dff227906ab
|
estimateWaitDuration
|
['"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) estimateWaitDuration() time.Duration {
var (
totalNodes int
guessedNodes = 3 // the number of nodes that we will use if we cannot get the actual number of nodes in the cluster
masterAdjust = 1.0
snoModifier = 0.0
emptyMCPWaitDuration = 2.0
minutesDuration = 1 * time.Minute
)
err := Retry(5, 3*time.Second, func() error {
var err error
totalNodes, err = mcp.getMachineCount()
return err
})
if err != nil {
logger.Errorf("Not able to get the number of nodes in the %s MCP. Making a guess of %d nodes. Err: %s", mcp.GetName(), guessedNodes, err)
totalNodes = guessedNodes
}
logger.Infof("Num nodes: %d, wait time per node %d minutes", totalNodes, mcp.MinutesWaitingPerNode)
// If the pool has no node configured, we wait at least 2.0 minute.
// There are tests that create pools with 0 nodes and wait for the pools to be updated. They cant wait 0 minutes.
// We wait 2.0 minutes and not 1 minute because many functions do not poll immediately and they wait a 1 minute interval before starting to poll.
// If we wait less than this interval the wait function will always fail
if totalNodes == 0 {
logger.Infof("Defining waiting time for pool with no nodes")
return time.Duration(emptyMCPWaitDuration * float64(minutesDuration))
}
if mcp.IsMaster() {
logger.Infof("Increase waiting time because it is master pool")
masterAdjust = 1.3 // if the pool is the master pool, we wait an extra 30% time
}
// Because of https://issues.redhat.com/browse/OCPBUGS-37501 in SNO MCPs can take up to 3 minutes more to be updated because the MCC is not taking the lease properly
if totalNodes == 1 {
var isSNO bool
err = Retry(5, 3*time.Second, func() error {
var snoErr error
isSNO, snoErr = IsSNOSafe(mcp.GetOC())
return snoErr
})
if err != nil {
logger.Errorf("Not able to know if the cluster is SNO. We guess it is SNO. Err: %s", err)
}
if isSNO || err != nil {
logger.Infof("Increase waiting time because it is SNO")
snoModifier = 3
}
}
return time.Duration(((float64(totalNodes*mcp.MinutesWaitingPerNode) * masterAdjust) + snoModifier) * float64(minutesDuration))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
7f8e0a83-33bd-4c29-aad1-ddc13e63159e
|
SetWaitingTimeForKernelChange
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) SetWaitingTimeForKernelChange() {
mcp.MinutesWaitingPerNode = DefaultMinutesWaitingPerNode + KernelChangeIncWait
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d786a095-9e03-435a-9b81-ae85e7921a87
|
SetWaitingTimeForExtensionsChange
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) SetWaitingTimeForExtensionsChange() {
mcp.MinutesWaitingPerNode = DefaultMinutesWaitingPerNode + ExtensionsChangeIncWait
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9dbc8ad4-d1f7-4d33-bd8d-d4ee17fa9781
|
SetDefaultWaitingTime
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) SetDefaultWaitingTime() {
mcp.MinutesWaitingPerNode = DefaultMinutesWaitingPerNode
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d66a5c00-e1cd-40b1-bf79-b2236d412794
|
GetInternalIgnitionConfigURL
|
['"fmt"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetInternalIgnitionConfigURL(secure bool) (string, error) {
var (
// SecurePort is the tls secured port to serve ignition configs
// InsecurePort is the port to serve ignition configs w/o tls
port = IgnitionSecurePort
protocol = "https"
)
internalAPIServerURI, err := GetAPIServerInternalURI(mcp.oc)
if err != nil {
return "", err
}
if !secure {
port = IgnitionInsecurePort
protocol = "http"
}
return fmt.Sprintf("%s://%s:%d/config/%s", protocol, internalAPIServerURI, port, mcp.GetName()), nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
9b3c6f55-ca0e-4d6e-ad5a-491aaf5fa851
|
GetMCSIgnitionConfig
|
['"encoding/json"', '"fmt"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetMCSIgnitionConfig(secure bool, ignitionVersion string) (string, error) {
var (
// SecurePort is the tls secured port to serve ignition configs
// InsecurePort is the port to serve ignition configs w/o tls
port = IgnitionSecurePort
)
if !secure {
port = IgnitionInsecurePort
}
url, err := mcp.GetInternalIgnitionConfigURL(secure)
if err != nil {
return "", err
}
// We will request the config from a master node
mMcp := NewMachineConfigPool(mcp.oc.AsAdmin(), MachineConfigPoolMaster)
masters, err := mMcp.GetNodes()
if err != nil {
return "", err
}
master := masters[0]
logger.Infof("Remove the IPV4 iptables rules that block the ignition config")
removedRules, err := master.RemoveIPTablesRulesByRegexp(fmt.Sprintf("%d", port))
defer master.ExecIPTables(removedRules)
if err != nil {
return "", err
}
logger.Infof("Remove the IPV6 ip6tables rules that block the ignition config")
removed6Rules, err := master.RemoveIP6TablesRulesByRegexp(fmt.Sprintf("%d", port))
defer master.ExecIP6Tables(removed6Rules)
if err != nil {
return "", err
}
cmd := []string{"curl", "-s"}
if secure {
cmd = append(cmd, "-k")
}
if ignitionVersion != "" {
cmd = append(cmd, []string{"-H", fmt.Sprintf("Accept:application/vnd.coreos.ignition+json;version=%s", ignitionVersion)}...)
}
cmd = append(cmd, url)
stdout, stderr, err := master.DebugNodeWithChrootStd(cmd...)
if err != nil {
return stdout + stderr, err
}
return stdout, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f3b4c193-38cb-4382-bba0-6ee08751ba98
|
getSelectedNodes
|
['"fmt"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) getSelectedNodes(extraLabels string) ([]Node, error) {
mcp.oc.NotShowInfo()
defer mcp.oc.SetShowInfo()
labelsString, err := mcp.Get(`{.spec.nodeSelector.matchLabels}`)
if err != nil {
return nil, err
}
labels := JSON(labelsString)
o.Expect(labels.Exists()).Should(o.BeTrue(), fmt.Sprintf("The pool has no matchLabels value defined: %s", mcp.PrettyString()))
nodeList := NewNodeList(mcp.oc)
// Never select windows nodes
requiredLabel := "kubernetes.io/os!=windows"
if extraLabels != "" {
requiredLabel += ","
requiredLabel += extraLabels
}
for k, v := range labels.ToMap() {
requiredLabel += fmt.Sprintf(",%s=%s", k, v.(string))
}
nodeList.ByLabel(requiredLabel)
return nodeList.GetAll()
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
5fe0372a-c53e-41c4-bd4d-7a7b03237efc
|
GetNodesByLabel
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodesByLabel(labels string) ([]Node, error) {
mcp.oc.NotShowInfo()
defer mcp.oc.SetShowInfo()
nodes, err := mcp.getSelectedNodes(labels)
if err != nil {
return nil, err
}
returnNodes := []Node{}
for _, item := range nodes {
node := item
primaryPool, err := node.GetPrimaryPool()
if err != nil {
return nil, err
}
if primaryPool.GetName() == mcp.GetName() {
returnNodes = append(returnNodes, node)
}
}
return returnNodes, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d6577fb6-7a33-4a28-a6f1-751f99a4957d
|
GetNodes
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodes() ([]Node, error) {
return mcp.GetNodesByLabel("")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
07802b95-4b63-474c-9e5b-2dd0e76d44e7
|
GetNodesWithoutArchitecture
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodesWithoutArchitecture(arch architecture.Architecture, archs ...architecture.Architecture) ([]Node, error) {
archsList := arch.String()
for _, itemArch := range archs {
archsList = archsList + "," + itemArch.String()
}
return mcp.GetNodesByLabel(fmt.Sprintf(`%s notin (%s)`, architecture.NodeArchitectureLabel, archsList))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
b487577a-24aa-4b7e-b8db-7e08aa822304
|
GetNodesWithoutArchitectureOrFail
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodesWithoutArchitectureOrFail(arch architecture.Architecture, archs ...architecture.Architecture) []Node {
nodes, err := mcp.GetNodesWithoutArchitecture(arch)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "In MCP %s. Cannot get the nodes NOT using architectures %s", mcp.GetName(), append(archs, arch))
return nodes
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
94d95a82-0f82-4d16-a510-5488160a6c5a
|
GetNodesByArchitecture
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodesByArchitecture(arch architecture.Architecture, archs ...architecture.Architecture) ([]Node, error) {
archsList := arch.String()
for _, itemArch := range archs {
archsList = archsList + "," + itemArch.String()
}
return mcp.GetNodesByLabel(fmt.Sprintf(`%s in (%s)`, architecture.NodeArchitectureLabel, archsList))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
7842e9c4-fee8-4145-9dc3-db125faffaa1
|
GetNodesByArchitectureOrFail
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodesByArchitectureOrFail(arch architecture.Architecture, archs ...architecture.Architecture) []Node {
nodes, err := mcp.GetNodesByArchitecture(arch)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "In MCP %s. Cannot get the nodes using architectures %s", mcp.GetName(), append(archs, arch))
return nodes
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
bcd50837-9344-4c12-bec4-142a2f5f223e
|
GetNodesOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetNodesOrFail() []Node {
ns, err := mcp.GetNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the nodes in %s MCP", mcp.GetName())
return ns
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a45ec25f-5a21-4405-87e2-dad782ad924c
|
GetCoreOsNodes
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetCoreOsNodes() ([]Node, error) {
return mcp.GetNodesByLabel("node.openshift.io/os_id=rhcos")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
045ae0ca-c6ca-43f0-9c3f-ded77f668313
|
GetCoreOsNodesOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetCoreOsNodesOrFail() []Node {
ns, err := mcp.GetCoreOsNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the coreOS nodes in %s MCP", mcp.GetName())
return ns
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
20b7e218-74b3-4e1f-b8bb-2b323cfb73c6
|
GetRhelNodes
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetRhelNodes() ([]Node, error) {
return mcp.GetNodesByLabel("node.openshift.io/os_id=rhel")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
73c407a5-c351-4c3a-8533-26c3e71a5cd1
|
GetRhelNodesOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetRhelNodesOrFail() []Node {
ns, err := mcp.GetRhelNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the rhel nodes in %s MCP", mcp.GetName())
return ns
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
ed961ed0-8fc2-4621-8489-f540a6aebbc1
|
GetSortedNodes
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetSortedNodes() ([]Node, error) {
poolNodes, err := mcp.GetNodes()
if err != nil {
return nil, err
}
if !mcp.IsMaster() {
return sortNodeList(poolNodes), nil
}
return sortMasterNodeList(mcp.oc, poolNodes)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9f9e11ea-1542-4e94-adc9-0477246c4c2e
|
GetSortedNodesOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetSortedNodesOrFail() []Node {
nodes, err := mcp.GetSortedNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(),
"Cannot get the list of nodes that belong to '%s' MCP", mcp.GetName())
return nodes
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
8173c6d6-dd4d-4b21-b484-1540a8f606bc
|
GetSortedUpdatedNodes
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetSortedUpdatedNodes(maxUnavailable int) []Node {
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s in pool %s for all nodes to start updating.", timeToWait, mcp.name)
poolNodes, errget := mcp.GetNodes()
o.Expect(errget).NotTo(o.HaveOccurred(), fmt.Sprintf("Cannot get nodes in pool %s", mcp.GetName()))
pendingNodes := poolNodes
updatedNodes := []Node{}
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 20*time.Second, timeToWait, immediate, func(_ context.Context) (bool, error) {
// If there are degraded machines, stop polling, directly fail
degradedstdout, degradederr := mcp.getDegradedMachineCount()
if degradederr != nil {
logger.Errorf("the err:%v, and try next round", degradederr)
return false, nil
}
if degradedstdout != 0 {
logger.Errorf("Degraded MC:\n%s", mcp.PrettyString())
exutil.AssertWaitPollNoErr(fmt.Errorf("Degraded machines"), fmt.Sprintf("mcp %s has degraded %d machines", mcp.name, degradedstdout))
}
// Check that there aren't more thatn maxUpdatingNodes updating at the same time
if maxUnavailable > 0 {
totalUpdating := 0
for _, node := range poolNodes {
if node.IsUpdating() {
totalUpdating++
}
}
if totalUpdating > maxUnavailable {
// print nodes for debug
mcp.oc.Run("get").Args("nodes").Execute()
exutil.AssertWaitPollNoErr(fmt.Errorf("maxUnavailable Not Honored. Pool %s, error: %d nodes were updating at the same time. Only %d nodes should be updating at the same time", mcp.GetName(), totalUpdating, maxUnavailable), "")
}
}
remainingNodes := []Node{}
for _, node := range pendingNodes {
if node.IsUpdating() {
logger.Infof("Node %s is UPDATING", node.GetName())
updatedNodes = append(updatedNodes, node)
} else {
remainingNodes = append(remainingNodes, node)
}
}
if len(remainingNodes) == 0 {
logger.Infof("All nodes have started to be updated on mcp %s", mcp.name)
return true, nil
}
logger.Infof(" %d remaining nodes", len(remainingNodes))
pendingNodes = remainingNodes
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Could not get the list of updated nodes on mcp %s", mcp.name))
return updatedNodes
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
1696bbff-912e-4366-8560-492b8c56c2df
|
GetCordonedNodes
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetCordonedNodes() []Node {
// requirement is: when pool is in updating state, get the updating node list
o.Expect(mcp.WaitForUpdatingStatus()).NotTo(o.HaveOccurred(), "Waiting for Updating status change failed")
// polling all nodes in this pool and check whether all cordoned nodes (SchedulingDisabled)
var allUpdatingNodes []Node
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 10*time.Minute, true, func(_ context.Context) (bool, error) {
nodes, nerr := mcp.GetNodes()
if nerr != nil {
return false, fmt.Errorf("Get all linux node failed, will try again in next run %v", nerr)
}
for _, node := range nodes {
schedulable, serr := node.IsSchedulable()
if serr != nil {
logger.Errorf("Checking node is schedulable failed %v", serr)
continue
}
if !schedulable {
allUpdatingNodes = append(allUpdatingNodes, node)
}
}
return len(allUpdatingNodes) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Could not get the list of updating nodes on mcp %s", mcp.GetName()))
return allUpdatingNodes
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f297ff43-6915-4bcb-a00f-38fd0a9027d7
|
GetUnreconcilableNodes
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetUnreconcilableNodes() ([]Node, error) {
allUnreconcilableNodes := []Node{}
allNodes, err := mcp.GetNodes()
if err != nil {
return nil, err
}
for _, n := range allNodes {
state := n.GetAnnotationOrFail(NodeAnnotationState)
if state == "Unreconcilable" {
allUnreconcilableNodes = append(allUnreconcilableNodes, n)
}
}
return allUnreconcilableNodes, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
dd01617b-d401-4947-b083-a71d998cfdc0
|
GetUnreconcilableNodesOrFail
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetUnreconcilableNodesOrFail() []Node {
allUnreconcilableNodes, err := mcp.GetUnreconcilableNodes()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Cannot get the unreconcilable nodes in %s MCP", mcp.GetName())
return allUnreconcilableNodes
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
82ce0686-3e36-4112-8a28-0b6cda9a6d3b
|
WaitForNotDegradedStatus
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp MachineConfigPool) WaitForNotDegradedStatus() error {
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s for MCP %s status to be not degraded.", timeToWait, mcp.name)
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
stdout, err := mcp.GetDegradedStatus()
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, "False") {
logger.Infof("MCP degraded status is False %s", mcp.name)
return true, nil
}
return false, nil
})
if err != nil {
logger.Errorf("MCP: %s .Error waiting for not degraded status: %s", mcp.GetName(), err)
}
return err
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
136ad241-5705-419e-8a63-1eb6e53b548f
|
WaitForUpdatedStatus
|
['"time"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp MachineConfigPool) WaitForUpdatedStatus() error {
return mcp.waitForConditionStatus("Updated", "True", mcp.estimateWaitDuration(), 1*time.Minute, false)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ae6a1402-af0f-41b9-8a91-44d340207cbb
|
WaitImmediateForUpdatedStatus
|
['"time"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp MachineConfigPool) WaitImmediateForUpdatedStatus() error {
return mcp.waitForConditionStatus("Updated", "True", mcp.estimateWaitDuration(), 1*time.Minute, true)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
15ac2662-0e34-4344-982b-aefc9577c9cc
|
WaitForUpdatingStatus
|
['"time"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp MachineConfigPool) WaitForUpdatingStatus() error {
return mcp.waitForConditionStatus("Updating", "True", 10*time.Minute, 5*time.Second, true)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
6a4403cc-9424-45ff-a214-7e8a1ddb253f
|
waitForConditionStatus
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp MachineConfigPool) waitForConditionStatus(condition, status string, timeout, interval time.Duration, immediate bool) error {
logger.Infof("Waiting %s for MCP %s condition %s to be %s", timeout, mcp.GetName(), condition, status)
err := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, immediate, func(_ context.Context) (bool, error) {
stdout, err := mcp.Get(`{.status.conditions[?(@.type=="` + condition + `")].status}`)
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, status) {
logger.Infof("MCP %s condition %s status is %s", mcp.GetName(), condition, stdout)
return true, nil
}
return false, nil
})
if err != nil {
logger.Errorf("MCP: %s .Error waiting for %s status: %s", mcp.GetName(), condition, err)
}
return err
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f5c0f962-897c-40a8-9ea2-885d52f0e407
|
WaitForMachineCount
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp MachineConfigPool) WaitForMachineCount(expectedMachineCount int, timeToWait time.Duration) error {
logger.Infof("Waiting %s for MCP %s to report %d machine count.", timeToWait, mcp.GetName(), expectedMachineCount)
immediate := true
err := wait.PollUntilContextTimeout(context.TODO(), 30*time.Second, timeToWait, immediate, func(_ context.Context) (bool, error) {
mCount, err := mcp.getMachineCount()
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if mCount == expectedMachineCount {
logger.Infof("MCP is reporting %d machine count", mCount)
return true, nil
}
logger.Infof("Expected machine count %d. Reported machine count %d", expectedMachineCount, mCount)
return false, nil
})
if err != nil {
logger.Errorf("MCP: %s .Error waiting for %d machine count: %s", mcp.GetName(), expectedMachineCount, err)
}
return err
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
6c711a77-7269-4480-98a8-4eb7862084eb
|
waitForComplete
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) waitForComplete() {
timeToWait := mcp.estimateWaitDuration()
logger.Infof("Waiting %s for MCP %s to be completed.", timeToWait, mcp.name)
waitFunc := func(_ context.Context) (bool, error) {
defer g.GinkgoRecover()
// If there are degraded machines, stop polling, directly fail
degradedstdout, degradederr := mcp.getDegradedMachineCount()
if degradederr != nil {
logger.Errorf("Error getting the number of degraded machines. Try next round: %s", degradederr)
return false, nil
}
if degradedstdout != 0 {
return true, fmt.Errorf("mcp %s has degraded %d machines", mcp.name, degradedstdout)
}
degradedStatus, err := mcp.GetDegradedStatus()
if err != nil {
logger.Errorf("Error getting degraded status.Try next round: %s", err)
return false, nil
}
if degradedStatus != FalseString {
return true, fmt.Errorf("mcp %s has degraded status: %s", mcp.name, degradedStatus)
}
stdout, err := mcp.Get(`{.status.conditions[?(@.type=="Updated")].status}`)
if err != nil {
logger.Errorf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, "True") {
// i.e. mcp updated=true, mc is applied successfully
logger.Infof("The new MC has been successfully applied to MCP '%s'", mcp.name)
return true, nil
}
return false, nil
}
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, waitFunc)
if err != nil && !strings.Contains(err.Error(), "degraded") {
mccLogs, logErr := NewController(mcp.GetOC()).GetLogs()
if logErr != nil {
logger.Errorf("Error getting MCC logs. Cannot check if drain is taking too long")
} else {
mccLatestLogs := GetLastNLines(mccLogs, 20)
if strings.Contains(mccLatestLogs, "error when evicting") {
logger.Infof("Some pods are taking too long to be evicted:\n%s", mccLatestLogs)
logger.Infof("Waiting for MCP %s another round! %s", mcp.name, timeToWait)
immediate = true
err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, waitFunc)
}
}
}
if err != nil {
exutil.ArchiveMustGatherFile(mcp.GetOC(), extractJournalLogs)
DebugDegradedStatus(mcp)
}
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), fmt.Sprintf("mc operation is not completed on mcp %s: %s", mcp.name, err))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
13545c8d-ff42-41db-916b-766703e4192d
|
GetPoolSynchronizersStatusByType
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetPoolSynchronizersStatusByType(pType string) (string, error) {
return mcp.Get(`{.status.poolSynchronizersStatus[?(@.poolSynchronizerType=="` + pType + `")]}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
adb333d9-090a-4b4d-a730-e2dab3102552
|
IsPinnedImagesComplete
|
['"fmt"', '"github.com/tidwall/gjson"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsPinnedImagesComplete() (bool, error) {
pinnedStatus, err := mcp.GetPoolSynchronizersStatusByType("PinnedImageSets")
if err != nil {
return false, err
}
logger.Infof("Pinned status: %s", pinnedStatus)
mcpMachineCount, err := mcp.Get(`{.status.machineCount}`)
if err != nil {
return false, err
}
if mcpMachineCount == "" {
return false, fmt.Errorf("status.machineCount is empty in mcp %s", mcp.GetName())
}
pinnedMachineCount := gjson.Get(pinnedStatus, "machineCount").String()
if pinnedMachineCount == "" {
return false, fmt.Errorf("pinned status machineCount is empty in mcp %s", mcp.GetName())
}
pinnedUnavailableMachineCount := gjson.Get(pinnedStatus, "unavailableMachineCount").String()
if pinnedUnavailableMachineCount == "" {
return false, fmt.Errorf("pinned status unavailableMachineCount is empty in mcp %s", mcp.GetName())
}
updatedMachineCount := gjson.Get(pinnedStatus, "updatedMachineCount").String()
if updatedMachineCount == "" {
return false, fmt.Errorf("pinned status updatedMachineCount is empty in mcp %s", mcp.GetName())
}
return mcpMachineCount == pinnedMachineCount && updatedMachineCount == pinnedMachineCount && pinnedUnavailableMachineCount == "0", nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
39925af7-89cc-4c48-a717-165bafe87ef6
|
allNodesReportingPinnedSuccess
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) allNodesReportingPinnedSuccess() (bool, error) {
allNodes, err := mcp.GetNodes()
if err != nil {
return false, err
}
if len(allNodes) == 0 {
logger.Infof("Warning, pool %s has no nodes!! We consider all nodes as correctly pinned", mcp.GetName())
}
for _, node := range allNodes {
nodeMCN := node.GetMachineConfigNode()
if nodeMCN.IsPinnedImageSetsDegraded() {
logger.Infof("Node %s is pinned degraded. Condition:\n%s", node.GetName(), nodeMCN.GetConditionByType("PinnedImageSetsDegraded"))
return false, nil
}
if nodeMCN.IsPinnedImageSetsProgressing() {
return false, nil
}
}
return true, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
74ef596e-6d9d-4b30-9850-4398a2b04378
|
waitForPinComplete
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) waitForPinComplete(timeToWait time.Duration) error {
logger.Infof("Waiting %s for MCP %s to complete pinned images.", timeToWait, mcp.name)
immediate := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
pinnedComplete, err := mcp.IsPinnedImagesComplete()
if err != nil {
logger.Infof("Error getting pinned complete: %s", err)
return false, err
}
if !pinnedComplete {
logger.Infof("Waiting for PinnedImageSets poolSynchronizersStatus status to repot success")
return false, nil
}
allNodesComplete, err := mcp.allNodesReportingPinnedSuccess()
if err != nil {
logger.Infof("Error getting if all nodes finished")
return false, err
}
if !allNodesComplete {
logger.Infof("Waiting for all nodes to report pinned images success")
return false, nil
}
logger.Infof("Pool %s successfully pinned the images! Complete!", mcp.GetName())
return true, nil
})
if err != nil {
logger.Infof("Pinned images operation is not completed on mcp %s", mcp.name)
}
return err
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
211b7835-527f-4172-8981-c558c269e12e
|
waitForPinApplied
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) waitForPinApplied(timeToWait time.Duration) error {
logger.Infof("Waiting %s for MCP %s to apply pinned images.", timeToWait, mcp.name)
immediate := true
pinnedStarted := false
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
pinnedComplete, err := mcp.IsPinnedImagesComplete()
if err != nil {
logger.Infof("Error getting pinned complete: %s", err)
return false, err
}
if !pinnedStarted && !pinnedComplete {
pinnedStarted = true
logger.Infof("Pool %s has started to pin images", mcp.GetName())
}
if pinnedStarted {
if !pinnedComplete {
logger.Infof("Waiting for PinnedImageSets poolSynchronizersStatus status to repot success")
return false, nil
}
allNodesComplete, err := mcp.allNodesReportingPinnedSuccess()
if err != nil {
logger.Infof("Error getting if all nodes finished")
return false, err
}
if !allNodesComplete {
logger.Infof("Waiting for all nodes to report pinned images success")
return false, nil
}
logger.Infof("Pool %s successfully pinned the images! Complete!", mcp.GetName())
return true, nil
}
logger.Infof("Pool %s has not started to pin images yet", mcp.GetName())
return false, nil
})
if err != nil {
logger.Infof("Pinned images operation is not applied on mcp %s", mcp.name)
}
return err
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
a5ebe288-d1e7-41fa-ab04-0728747cd59f
|
GetReportedOsImageOverrideValue
|
['"fmt"', '"strings"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetReportedOsImageOverrideValue() (string, error) {
query := fmt.Sprintf(`os_image_url_override{pool="%s"}`, strings.ToLower(mcp.GetName()))
mon, err := exutil.NewMonitor(mcp.oc.AsAdmin())
if err != nil {
return "", err
}
osImageOverride, err := mon.SimpleQuery(query)
if err != nil {
return "", err
}
jsonOsImageOverride := JSON(osImageOverride)
status := jsonOsImageOverride.Get("status").ToString()
if status != "success" {
return "", fmt.Errorf("Query %s execution failed: %s", query, osImageOverride)
}
logger.Infof("%s metric is:%s", query, osImageOverride)
metricValue := JSON(osImageOverride).Get("data").Get("result").Item(0).Get("value").Item(1).ToString()
return metricValue, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
6ecf4936-739e-4cc5-b45c-a060d9af4a2f
|
RecoverFromDegraded
|
['"fmt"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) RecoverFromDegraded() error {
logger.Infof("Recovering %s pool from degraded status", mcp.GetName())
mcpNodes, _ := mcp.GetNodes()
for _, node := range mcpNodes {
logger.Infof("Restoring desired config in node: %s", node)
if node.IsUpdated() {
logger.Infof("node is updated, don't need to recover")
} else {
err := node.RestoreDesiredConfig()
if err != nil {
return fmt.Errorf("Error restoring desired config in node %s. Error: %s",
mcp.GetName(), err)
}
}
}
derr := mcp.WaitForNotDegradedStatus()
if derr != nil {
logger.Infof("Could not recover from the degraded status: %s", derr)
return derr
}
uerr := mcp.WaitForUpdatedStatus()
if uerr != nil {
logger.Infof("Could not recover from the degraded status: %s", uerr)
return uerr
}
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
e1ab7743-b839-4ea8-bb3c-6a14663dc009
|
IsRealTimeKernel
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) IsRealTimeKernel() (bool, error) {
nodes, err := mcp.GetNodes()
if err != nil {
logger.Errorf("Error getting the nodes in pool %s", mcp.GetName())
return false, err
}
return nodes[0].IsRealTimeKernel()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
002b0060-43cd-4296-b6b0-02d411aba3b2
|
GetConfiguredMachineConfig
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetConfiguredMachineConfig() (*MachineConfig, error) {
currentMcName, err := mcp.Get("{.status.configuration.name}")
if err != nil {
logger.Errorf("Error getting the currently configured MC in pool %s: %s", mcp.GetName(), err)
return nil, err
}
logger.Debugf("The currently configured MC in pool %s is: %s", mcp.GetName(), currentMcName)
return NewMachineConfig(mcp.oc, currentMcName, mcp.GetName()), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
fc61add2-93d2-4ef4-b1a1-2b3ee1f7343d
|
SanityCheck
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) SanityCheck() error {
timeToWait := mcp.estimateWaitDuration() / 13
logger.Infof("Waiting %s for MCP %s to be completed.", timeToWait.Round(time.Second), mcp.name)
const trueStatus = "True"
var message string
immediate := true
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
// If there are degraded machines, stop polling, directly fail
degraded, degradederr := mcp.GetDegradedStatus()
if degradederr != nil {
message = fmt.Sprintf("Error gettting Degraded status: %s", degradederr)
return false, nil
}
if degraded == trueStatus {
message = fmt.Sprintf("MCP '%s' is degraded", mcp.GetName())
return false, nil
}
updated, err := mcp.GetUpdatedStatus()
if err != nil {
message = fmt.Sprintf("Error gettting Updated status: %s", err)
return false, nil
}
if updated == trueStatus {
logger.Infof("MCP '%s' is ready for testing", mcp.name)
return true, nil
}
message = fmt.Sprintf("MCP '%s' is not updated", mcp.GetName())
return false, nil
})
if err != nil {
return fmt.Errorf(message)
}
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ccd5c00f-c8e7-4e07-9796-294830e662fa
|
GetCertsExpiry
|
['"encoding/json"']
|
['CertExpiry', 'MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetCertsExpiry() ([]CertExpiry, error) {
expiryString, err := mcp.Get(`{.status.certExpirys}`)
if err != nil {
return nil, err
}
var certsExp []CertExpiry
jsonerr := json.Unmarshal([]byte(expiryString), &certsExp)
if jsonerr != nil {
return nil, jsonerr
}
return certsExp, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
8d578afa-2a6a-4066-86a2-727ac381c2cf
|
GetArchitectures
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetArchitectures() ([]architecture.Architecture, error) {
archs := []architecture.Architecture{}
nodes, err := mcp.GetNodes()
if err != nil {
return archs, err
}
for _, node := range nodes {
archs = append(archs, node.GetArchitectureOrFail())
}
return archs, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
7a90580f-a0fd-4bd5-855e-7cabf5d43f2f
|
GetArchitecturesOrFail
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) GetArchitecturesOrFail() []architecture.Architecture {
archs, err := mcp.GetArchitectures()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error getting the architectures used by nodes in MCP %s", mcp.GetName())
return archs
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
8b1fabeb-6722-4f32-9321-f9cb5d41ccec
|
AllNodesUseArch
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['MachineConfigPool']
|
github.com/openshift/openshift-tests-private/test/extended/mco/machineconfigpool.go
|
func (mcp *MachineConfigPool) AllNodesUseArch(arch architecture.Architecture) bool {
for _, currentArch := range mcp.GetArchitecturesOrFail() {
if arch != currentArch {
return false
}
}
return true
}
|
mco
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.