element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
84c6cac3-3d91-4885-9335-83cb354f6afc
|
Author:dis-Medium-46724-cvo defaults deployment replicas to one if it's unset in manifest [Flaky]
|
['"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Author:dis-Medium-46724-cvo defaults deployment replicas to one if it's unset in manifest [Flaky]", func() {
exutil.SkipBaselineCaps(oc, "None, v4.11")
exutil.By("Check the replicas for openshift-insights/insights-operator is unset in manifest")
tempDataDir, err := extractManifest(oc)
defer func() { o.Expect(os.RemoveAll(tempDataDir)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
manifestDir := filepath.Join(tempDataDir, "manifest")
namespace, name := "openshift-insights", "insights-operator"
cmd := fmt.Sprintf(
"grep -rlZ 'kind: Deployment' %s | xargs -0 grep -l 'name: %s\\|namespace: %s' | xargs grep replicas",
manifestDir, name, namespace)
e2e.Logf("executing: bash -c %s", cmd)
out, err := exec.Command("bash", "-c", cmd).CombinedOutput()
// We expect no replicas could be found, so the cmd should return with non-zero
o.Expect(err).To(o.HaveOccurred(), "Command: \"%s\" returned success instead of error: %s", cmd, string(out))
o.Expect(string(out)).To(o.BeEmpty())
exutil.By("Check only one insights-operator pod in a fresh installed cluster")
num, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("deployment", name,
"-o=jsonpath={.spec.replicas}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(num).To(o.Equal("1"))
defer func() {
out, err := oc.AsAdmin().WithoutNamespace().Run("scale").
Args("--replicas", "1",
fmt.Sprintf("deployment/%s", name),
"-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred(), out)
}()
exutil.By("Scale down insights-operator replica to 0")
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").
Args("--replicas", "0",
fmt.Sprintf("deployment/%s", name),
"-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the insights-operator replica recovers to one")
exutil.AssertWaitPollNoErr(wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
num, err = oc.AsAdmin().WithoutNamespace().Run("get").
Args("deployment", name,
"-o=jsonpath={.spec.replicas}",
"-n", namespace).Output()
return num == "1", err
}), "insights-operator replicas is not 1")
exutil.By("Scale up insights-operator replica to 2")
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").
Args("--replicas", "2",
fmt.Sprintf("deployment/%s", name),
"-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the insights-operator replica recovers to one")
exutil.AssertWaitPollNoErr(wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
num, err = oc.AsAdmin().WithoutNamespace().Run("get").
Args("deployment", name,
"-o=jsonpath={.spec.replicas}",
"-n", namespace).Output()
return num == "1", err
}), "insights-operator replicas is not 1")
})
| |||||
test case
|
openshift/openshift-tests-private
|
8af92501-de66-457f-b323-9c0114a5300e
|
Author:jiajliu-Medium-47198-Techpreview operator will not be installed on a fresh installed
|
['"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"strings"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Author:jiajliu-Medium-47198-Techpreview operator will not be installed on a fresh installed", func() {
tpOperatorNames := []string{"cluster-api"}
tpOperator := []map[string]string{
{"ns": "openshift-cluster-api", "co": tpOperatorNames[0]}}
featuregate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Featuregate: %s", featuregate)
if featuregate != "{}" {
if strings.Contains(featuregate, "TechPreviewNoUpgrade") {
g.Skip("This case is only suitable for non-techpreview cluster!")
} else if strings.Contains(featuregate, "CustomNoUpgrade") {
e2e.Logf("Drop openshift-cluster-api ns due to CustomNoUpgrade fs enabled!")
delete(tpOperator[0], "ns")
} else {
e2e.Failf("Neither TechPreviewNoUpgrade fs nor CustomNoUpgrade fs enabled, stop here to confirm expected behavior first!")
}
}
exutil.By("Check annotation release.openshift.io/feature-set=TechPreviewNoUpgrade in manifests are correct.")
tempDataDir, err := extractManifest(oc)
defer func() { o.Expect(os.RemoveAll(tempDataDir)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
manifestDir := filepath.Join(tempDataDir, "manifest")
cmd := fmt.Sprintf("grep -rl 'release.openshift.io/feature-set: .*TechPreviewNoUpgrade.*' %s|grep 'cluster.*operator.yaml'", manifestDir)
featuresetTechPreviewManifest, err := exec.Command("bash", "-c", cmd).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred(), "Command: \"%s\" returned error: %s", cmd, string(featuresetTechPreviewManifest))
tpOperatorFilePaths := strings.Split(strings.TrimSpace(string(featuresetTechPreviewManifest)), "\n")
o.Expect(len(tpOperatorFilePaths)).To(o.Equal(len(tpOperator)))
e2e.Logf("Expected number of cluster operator manifest files with correct annotation found!")
for _, file := range tpOperatorFilePaths {
data, err := os.ReadFile(file)
o.Expect(err).NotTo(o.HaveOccurred())
var co configv1.ClusterOperator
err = yaml.Unmarshal(data, &co)
o.Expect(err).NotTo(o.HaveOccurred())
for i := 0; i < len(tpOperatorNames); i++ {
if co.Name == tpOperatorNames[i] {
e2e.Logf("Found %s in file %v!", tpOperatorNames[i], file)
tpOperatorNames = append(tpOperatorNames[:i], tpOperatorNames[i+1:]...)
break
}
}
}
o.Expect(len(tpOperatorNames)).To(o.Equal(0))
e2e.Logf("All expected tp operators found in manifests!")
exutil.By("Check no TP operator installed by default.")
for i := 0; i < len(tpOperator); i++ {
for k, v := range tpOperator[i] {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(k, v).Output()
o.Expect(err).To(o.HaveOccurred(), "techpreview operator '%s %s' absence check failed: expecting an error, received: '%s'", k, v, output)
o.Expect(output).To(o.ContainSubstring("NotFound"))
e2e.Logf("Expected: Resource %s/%v not found!", k, v)
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
a4d8499d-e642-4c1d-bd8b-1c076433367a
|
Author:dis-Medium-47757-cvo respects the deployment strategy in manifests [Serial]
|
['"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Author:dis-Medium-47757-cvo respects the deployment strategy in manifests [Serial]", func() {
exutil.SkipBaselineCaps(oc, "None, v4.11")
exutil.By("Get the strategy for openshift-insights/insights-operator in manifest")
tempDataDir, err := extractManifest(oc)
defer func() { o.Expect(os.RemoveAll(tempDataDir)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
manifestDir := filepath.Join(tempDataDir, "manifest")
namespace, name := "openshift-insights", "insights-operator"
cmd := fmt.Sprintf(
"grep -rlZ 'kind: Deployment' %s | xargs -0 grep -l 'name: %s' | xargs grep strategy -A1 | sed -n 2p | cut -f2 -d ':'",
manifestDir, name)
e2e.Logf("executing: bash -c %s", cmd)
out, err := exec.Command("bash", "-c", cmd).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred(), "Command: \"%s\" returned error: %s", cmd, string(out))
o.Expect(out).NotTo(o.BeEmpty())
expectStrategy := strings.TrimSpace(string(out))
e2e.Logf(expectStrategy)
exutil.By("Check in-cluster insights-operator has the same strategy with manifest")
existStrategy, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("deployment", name,
"-o=jsonpath={.spec.strategy}",
"-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(existStrategy).To(o.ContainSubstring(expectStrategy))
exutil.By("Change the strategy")
var patch []JSONp
if expectStrategy == "Recreate" {
patch = []JSONp{{"replace", "/spec/strategy/type", "RollingUpdate"}}
} else {
patch = []JSONp{
{"remove", "/spec/strategy/rollingUpdate", nil},
{"replace", "/spec/strategy/type", "Recreate"},
}
}
_, err = ocJSONPatch(oc, namespace, fmt.Sprintf("deployment/%s", name), patch)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the strategy reverted after 5 minutes")
if pollErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
curStrategy, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("deployment", name, "-o=jsonpath={.spec.strategy}", "-n", namespace).Output()
if err != nil {
return false, fmt.Errorf("oc get deployment %s returned error: %v", name, err)
}
return strings.Contains(curStrategy, expectStrategy), nil
}); pollErr != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", name, "-o", "yaml").Execute()
//If the strategy is not reverted, manually change it back
if expectStrategy == "Recreate" {
patch = []JSONp{
{"remove", "/spec/strategy/rollingUpdate", nil},
{"replace", "/spec/strategy/type", "Recreate"},
}
} else {
patch = []JSONp{{"replace", "/spec/strategy/type", "RollingUpdate"}}
}
_, err = ocJSONPatch(oc, namespace, fmt.Sprintf("deployment/%s", name), patch)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertWaitPollNoErr(pollErr, "Strategy is not reverted back after 5 minutes")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
ce262988-b5cf-445b-9a20-43daa01e09d5
|
Longduration-NonPreRelease-Author:evakhoni-Medium-48247-Prometheus is able to scrape metrics from the CVO after rotation of the signer ca in openshift-service-ca [Disruptive]
|
['"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Longduration-NonPreRelease-Author:evakhoni-Medium-48247-Prometheus is able to scrape metrics from the CVO after rotation of the signer ca in openshift-service-ca [Disruptive]", func() {
exutil.By("Check for alerts Before signer ca rotation.")
alertCVODown := getAlert(oc, ".labels.alertname == \"ClusterVersionOperatorDown\"")
alertTargetDown := getAlert(oc, ".labels.alertname == \"TargetDown\" and .labels.service == \"cluster-version-operator\"")
o.Expect(alertCVODown).To(o.BeNil())
o.Expect(alertTargetDown).To(o.BeNil())
exutil.By("Force signer ca rotation by deleting signing-key.")
result, err := oc.AsAdmin().WithoutNamespace().Run("delete").
Args("secret/signing-key", "-n", "openshift-service-ca").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("delete returned: %s", result)
o.Expect(result).To(o.ContainSubstring("deleted"))
exutil.By("Check new signing-key is recreated")
exutil.AssertWaitPollNoErr(wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) {
// supposed to fail until available so polling and suppressing the error
out, _ := exec.Command(
"bash", "-c", "oc -n openshift-service-ca get secret/signing-key -o jsonpath='{.metadata.name}'").Output()
e2e.Logf("signing-key name: %s", string(out))
return strings.Contains(string(out), "signing-key"), nil
}), "signing-key not recreated within 30s")
exutil.By("Wait for Prometheus route to be available")
// firstly wait until route is unavailable
err = wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) {
out, cmderr := exec.Command("bash", "-c", "oc get route prometheus-k8s -n openshift-monitoring").CombinedOutput()
if cmderr != nil {
// oc get route returns "exit status 1" once unavailable
if !strings.Contains(cmderr.Error(), "exit status 1") {
return false, fmt.Errorf("oc get route prometheus-k8s returned different unexpected error: %v\n%s", cmderr, string(out))
}
return true, nil
}
return false, nil
})
if err != nil {
// sometimes route stays available, won't impact rest of the test
o.Expect(err.Error()).To(o.ContainSubstring("timed out waiting for the condition"))
}
// wait until available again
exutil.AssertWaitPollNoErr(wait.Poll(10*time.Second, 600*time.Second, func() (bool, error) {
// supposed to fail until available so polling and suppressing the error
out, _ := exec.Command(
"bash", "-c", "oc get route prometheus-k8s -n openshift-monitoring -o jsonpath='{.status.ingress[].conditions[].status}'").Output()
e2e.Logf("prometheus route status: '%s'", string(out))
return strings.Contains(string(out), "True"), nil
}), "Prometheus route is unavailable for 10m")
exutil.By("Check CVO accessible by Prometheus - After signer ca rotation.")
seenAlertCVOd, seenAlertTD := false, false
// alerts may appear within first 5 minutes, and fire after 10 more mins
err = wait.Poll(1*time.Minute, 15*time.Minute, func() (bool, error) {
alertCVODown = getAlert(oc, ".labels.alertname == \"ClusterVersionOperatorDown\"")
alertTargetDown = getAlert(oc, ".labels.alertname == \"TargetDown\" and .labels.service == \"cluster-version-operator\"")
if alertCVODown != nil {
e2e.Logf("alert ClusterVersionOperatorDown found - checking state..")
o.Expect(alertCVODown["state"]).NotTo(o.Equal("firing"))
seenAlertCVOd = true
}
if alertTargetDown != nil {
e2e.Logf("alert TargetDown for CVO found - checking state..")
o.Expect(alertTargetDown["state"]).NotTo(o.Equal("firing"))
seenAlertTD = true
}
if alertCVODown == nil && alertTargetDown == nil {
if seenAlertCVOd && seenAlertTD {
e2e.Logf("alerts pended and disappeared. success.")
return true, nil
}
}
return false, nil
})
if err != nil {
o.Expect(err.Error()).To(o.ContainSubstring("timed out waiting for the condition"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
219b9d28-7943-4788-b849-3c9ec7ff6ee4
|
ConnectedOnly-Author:jianl-Low-21771-Upgrade cluster when current version is not in the graph from upstream [Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"strings"', '"time"', '"cloud.google.com/go/storage"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("ConnectedOnly-Author:jianl-Low-21771-Upgrade cluster when current version is not in the graph from upstream [Serial]", func() {
var graphURL, bucket, object, targetVersion, targetPayload string
origVersion, err := getCVObyJP(oc, ".status.desired.version")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if upstream patch required")
jsonpath := ".status.conditions[?(.type=='RetrievedUpdates')].reason"
reason, err := getCVObyJP(oc, jsonpath)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(reason, "VersionNotFound") {
e2e.Logf("no patch required. skipping upstream creation")
targetVersion = GenerateReleaseVersion(oc)
targetPayload = GenerateReleasePayload(oc)
} else {
exutil.By("Check if it's a GCP cluster")
exutil.SkipIfPlatformTypeNot(oc, "gcp")
origUpstream, err := getCVObyJP(oc, ".spec.upstream")
o.Expect(err).NotTo(o.HaveOccurred())
origChannel, err := getCVObyJP(oc, ".spec.channel")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Original upstream: %s, original channel: %s", origUpstream, origChannel)
defer restoreCVSpec(origUpstream, origChannel, oc)
exutil.By("Patch upstream")
projectID := "openshift-qe"
ctx := context.Background()
client, err := storage.NewClient(ctx)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(client.Close()).NotTo(o.HaveOccurred()) }()
graphURL, bucket, object, targetVersion, targetPayload, err = buildGraph(
client, oc, projectID, "cincy-source-not-in-graph.json")
defer func() { o.Expect(DeleteBucket(client, bucket)).NotTo(o.HaveOccurred()) }()
defer func() { o.Expect(DeleteObject(client, bucket, object)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = ocJSONPatch(oc, "", "clusterversion/version", []JSONp{
{"add", "/spec/upstream", graphURL},
{"add", "/spec/channel", "channel-a"},
})
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check RetrievedUpdates reason VersionNotFound after patching upstream")
jsonpath = ".status.conditions[?(.type=='RetrievedUpdates')].reason"
exutil.AssertWaitPollNoErr(wait.Poll(5*time.Second, 15*time.Second, func() (bool, error) {
reason, err := getCVObyJP(oc, jsonpath)
if err != nil {
return false, fmt.Errorf("get CVO RetrievedUpdates condition returned error: %v", err)
}
e2e.Logf("received reason: '%s'", reason)
return strings.Contains(reason, "VersionNotFound"), nil
}), "Failed to check RetrievedUpdates!=True")
}
exutil.By("Give appropriate error on oc adm upgrade --to")
toOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "--to", targetVersion).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(toOutput).To(o.ContainSubstring("Unable to retrieve available updates"))
o.Expect(toOutput).To(o.ContainSubstring("specify --to-image to continue with the update"))
exutil.By("Give appropriate error on oc adm upgrade --to-image")
toImageOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").
Args("upgrade", "--to-image", targetPayload).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(toImageOutput).To(o.ContainSubstring("Unable to retrieve available updates"))
o.Expect(toImageOutput).To(o.ContainSubstring("specify --allow-explicit-upgrade to continue with the update"))
defer func() {
o.Expect(recoverReleaseAccepted(oc)).NotTo(o.HaveOccurred())
}()
exutil.By("give appropriate error on CVO for upgrade to invalid payload ")
invalidPayload := "quay.io/openshift-release-dev/ocp-release@sha256:0000000000000000000000000000000000000000000000000000000000000000"
invalidPayloadOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").
Args("upgrade", "--allow-explicit-upgrade", "--force", "--to-image", invalidPayload).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(invalidPayloadOutput).To(o.ContainSubstring("Updating to release image"))
// usually happens quicker, but 8 minutes is safe deadline
if err = waitForCondition(oc, 30, 480, "False",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ReleaseAccepted condition is not false in 8m")
}
message, err := getCVObyJP(oc, ".status.conditions[?(.type=='ReleaseAccepted')].message")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(message).To(o.ContainSubstring("Retrieving payload failed"))
o.Expect(message).To(o.ContainSubstring("status initcontainer cleanup is waiting with reason \"ErrImagePull\""))
o.Expect(message).To(o.ContainSubstring(invalidPayload))
o.Expect(recoverReleaseAccepted(oc)).NotTo(o.HaveOccurred())
exutil.By("Find enable-auto-update index in deployment")
origAutoState, autoUpdIndex, err := getCVOcontArg(oc, "enable-auto-update")
defer func() {
out, err := patchCVOcontArg(oc, autoUpdIndex, fmt.Sprintf("--enable-auto-update=%s", origAutoState))
o.Expect(err).NotTo(o.HaveOccurred(), out)
}()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = patchCVOcontArg(oc, autoUpdIndex, "--enable-auto-update=true")
o.Expect(err).NotTo(o.HaveOccurred())
// recovery: once enable-auto-update is reconciled (~30sec), deployment becomes unavailable for up to CVO minimum reconcile period (~2-4min)
defer func() {
if err = waitForCondition(oc, 30, 240, "True",
"get", "-n", "openshift-cluster-version", "deployments/cluster-version-operator", "-o", "jsonpath={.status.conditions[?(.type=='Available')].status}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "deployments/cluster-version-operator not available after 4m")
}
}()
defer func() {
exutil.AssertWaitPollNoErr(wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
depArgs, _, err := getCVOcontArg(oc, "enable-auto-update")
if err != nil {
return false, fmt.Errorf("get CVO container args returned error: %v", err)
}
e2e.Logf("argument: %s", depArgs)
return strings.Contains(depArgs, "false"), nil
}), "Failed waiting for enable-auto-update=false")
}()
exutil.By("Wait for enable-auto-update")
exutil.AssertWaitPollNoErr(wait.PollImmediate(2*time.Second, 10*time.Second, func() (bool, error) {
depArgs, _, err := getCVOcontArg(oc, "enable-auto-update")
if err != nil {
return false, fmt.Errorf("get CVO container args returned error: %v", err)
}
e2e.Logf("argument: %s", depArgs)
return strings.Contains(depArgs, "true"), nil
}), "Failed waiting for enable-auto-update=true")
exutil.By("Check cvo can not get available update after setting enable-auto-update")
exutil.AssertWaitPollNoErr(wait.Poll(5*time.Second, 15*time.Second, func() (bool, error) {
reason, err := getCVObyJP(oc, ".status.conditions[?(.type=='RetrievedUpdates')].reason")
if err != nil {
return false, fmt.Errorf("get CVO RetreivedUpdates condition returned error: %v", err)
}
e2e.Logf("reason: %s", reason)
return strings.Contains(reason, "VersionNotFound"), nil
}), "Failed to check cvo can not get available update")
exutil.By("Check availableUpdates is null")
o.Expect(getCVObyJP(oc, ".status.availableUpdates")).To(o.Equal("null"), "unexpected availableUpdates") // changed from <nil> to null in 4.16
exutil.By("Check desired version haven't changed")
o.Expect(getCVObyJP(oc, ".status.desired.version")).To(o.Equal(origVersion), "unexpected desired version change")
})
| |||||
test case
|
openshift/openshift-tests-private
|
adccc853-5716-41ce-80bd-651ac55e2e69
|
Longduration-NonPreRelease-Author:evakhoni-Medium-22641-Rollback against a dummy start update with oc adm upgrade clear [Serial]
|
['"regexp"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Longduration-NonPreRelease-Author:evakhoni-Medium-22641-Rollback against a dummy start update with oc adm upgrade clear [Serial]", func() {
// preserve original message
originalMessage, err := getCVObyJP(oc, ".status.conditions[?(.type=='ReleaseAccepted')].message")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("request upgrade to fake payload")
fakeReleasePayload := "registry.ci.openshift.org/ocp/release@sha256:5a561dc23a9d323c8bd7a8631bed078a9e5eec690ce073f78b645c83fb4cdf74"
err = oc.AsAdmin().WithoutNamespace().Run("adm").
Args("upgrade", "--allow-explicit-upgrade", "--force", "--to-image", fakeReleasePayload).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(recoverReleaseAccepted(oc)).NotTo(o.HaveOccurred()) }()
exutil.By("check ReleaseAccepted=False")
// usually happens quicker, but 8 minutes is safe deadline
if err = waitForCondition(oc, 30, 480, "False",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ReleaseAccepted condition is not false in 8m")
}
exutil.By("check ReleaseAccepted False have correct message")
message, err := getCVObyJP(oc, ".status.conditions[?(.type=='ReleaseAccepted')].message")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(message).To(o.ContainSubstring("Unable to download and prepare the update: deadline exceeded"))
o.Expect(message).To(o.ContainSubstring("Job was active longer than specified deadline"))
o.Expect(message).To(o.ContainSubstring(fakeReleasePayload))
exutil.By("check version pod in ImagePullBackOff")
// swinging betseen Init:0/4 Init:ErrImagePull and Init:ImagePullBackOff so need a few retries
if err = waitForCondition(oc, 5, 30, "ImagePullBackOff",
"get", "-n", "openshift-cluster-version", "pods", "-o", "jsonpath={.items[*].status.initContainerStatuses[0].state.waiting.reason}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-cluster-version", "pods", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ImagePullBackOff not detected in 30s")
}
exutil.By("Clear above unstarted upgrade")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "--clear").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if err = waitForCondition(oc, 30, 480, "True",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ReleaseAccepted condition is not false in 8m")
}
exutil.By("check ReleaseAccepted False have correct message")
message, err = getCVObyJP(oc, ".status.conditions[?(.type=='ReleaseAccepted')].message")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(message).To(o.ContainSubstring(regexp.MustCompile(` architecture=".*"`).ReplaceAllString(originalMessage, ""))) // until OCPBUGS-4032 is fixed
exutil.By("no version pod in ImagePullBackOff")
if err = waitForCondition(oc, 5, 30, "",
"get", "-n", "openshift-cluster-version", "pods", "-o", "jsonpath={.items[*].status.initContainerStatuses[0].state.waiting.reason}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-cluster-version", "pods", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ImagePullBackOff not cleared in 30s")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
ede455d8-ea7b-430d-bb72-d9874d05749a
|
Longduration-NonPreRelease-Author:jiajliu-High-46017-CVO should keep reconcile manifests when update failed on precondition check [Disruptive]
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Longduration-NonPreRelease-Author:jiajliu-High-46017-CVO should keep reconcile manifests when update failed on precondition check [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
//Take openshift-marketplace/deployment as an example, it can be any resource which included in manifest files
resourceKindName := "deployment/marketplace-operator"
resourceNamespace := "openshift-marketplace"
exutil.By("Check default rollingUpdate strategy in a fresh installed cluster.")
defaultValueMaxUnavailable, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args(resourceKindName, "-o=jsonpath={.spec.strategy.rollingUpdate.maxUnavailable}",
"-n", resourceNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultValueMaxUnavailable).To(o.Equal("25%"))
exutil.By("Ensure upgradeable=false.")
upgStatusOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(upgStatusOutput, "Upgradeable=False") {
e2e.Logf("Enable upgradeable=false explicitly...")
//set overrides in cv to trigger upgradeable=false condition if it is not enabled by default
err = setCVOverrides(oc, "deployment", "network-operator", "openshift-network-operator")
defer unsetCVOverrides(oc)
exutil.AssertWaitPollNoErr(err, "timeout to set overrides!")
}
exutil.By("Trigger update when upgradeable=false and precondition check fail.")
//Choose a fixed old release payload to trigger a fake upgrade when upgradeable=false
oldReleasePayload := "quay.io/openshift-release-dev/ocp-release@sha256:fd96300600f9585e5847f5855ca14e2b3cafbce12aefe3b3f52c5da10c4476eb"
err = oc.AsAdmin().WithoutNamespace().Run("adm").
Args("upgrade", "--allow-explicit-upgrade", "--to-image", oldReleasePayload).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(recoverReleaseAccepted(oc)).NotTo(o.HaveOccurred()) }()
if err = waitForCondition(oc, 30, 480, "False",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ReleaseAccepted condition is not false in 8m")
}
exutil.By("Change strategy.rollingUpdate.maxUnavailable to be 50%.")
_, err = ocJSONPatch(oc, resourceNamespace, resourceKindName, []JSONp{
{"replace", "/spec/strategy/rollingUpdate/maxUnavailable", "50%"},
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
out, err := ocJSONPatch(oc, resourceNamespace, resourceKindName, []JSONp{
{"replace", "/spec/strategy/rollingUpdate/maxUnavailable", "25%"},
})
o.Expect(err).NotTo(o.HaveOccurred(), out)
}()
exutil.By("Check the deployment was reconciled back.")
exutil.AssertWaitPollNoErr(wait.Poll(30*time.Second, 20*time.Minute, func() (bool, error) {
valueMaxUnavailable, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args(resourceKindName, "-o=jsonpath={.spec.strategy.rollingUpdate.maxUnavailable}", "-n", resourceNamespace).Output()
if err != nil {
return false, fmt.Errorf("oc get %s -n %s returned error: %v", resourceKindName, resourceNamespace, err)
}
if strings.Compare(valueMaxUnavailable, defaultValueMaxUnavailable) != 0 {
e2e.Logf("valueMaxUnavailable is %v. Waiting for deployment being reconciled...", valueMaxUnavailable)
return false, nil
}
return true, nil
}), "the deployment was not reconciled back in 20min.")
})
| |||||
test case
|
openshift/openshift-tests-private
|
8c0269fa-5833-4800-8fe9-5c01b48ea7ce
|
Longduration-NonPreRelease-Author:jiajliu-Medium-51973-setting cv.overrides should work while ReleaseAccepted=False [Disruptive]
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Longduration-NonPreRelease-Author:jiajliu-Medium-51973-setting cv.overrides should work while ReleaseAccepted=False [Disruptive]", func() {
resourceKind := "deployment"
resourceName := "network-operator"
resourceNamespace := "openshift-network-operator"
exutil.By("Trigger ReleaseAccepted=False condition.")
fakeReleasePayload := "quay.io/openshift-release-dev-test/ocp-release@sha256:39efe13ef67cb4449f5e6cdd8a26c83c07c6a2ce5d235dfbc3ba58c64418fcf3"
err := oc.AsAdmin().WithoutNamespace().Run("adm").
Args("upgrade", "--allow-explicit-upgrade", "--to-image", fakeReleasePayload).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(recoverReleaseAccepted(oc)).NotTo(o.HaveOccurred()) }()
if err = waitForCondition(oc, 30, 480, "False",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
//dump contents to log
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o", "yaml").Execute()
exutil.AssertWaitPollNoErr(err, "ReleaseAccepted condition is not false in 8m")
}
exutil.By("Disable deployment/network-operator's management through setting cv.overrides.")
err = setCVOverrides(oc, resourceKind, resourceName, resourceNamespace)
defer unsetCVOverrides(oc)
exutil.AssertWaitPollNoErr(err, "timeout to set overrides!")
exutil.By("Check default rollingUpdate strategy.")
defaultValueMaxUnavailable, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args(resourceKind, resourceName, "-o=jsonpath={.spec.strategy.rollingUpdate.maxUnavailable}",
"-n", resourceNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultValueMaxUnavailable).To(o.Equal("1"))
exutil.By("Change strategy.rollingUpdate.maxUnavailable to be 50%.")
_, err = ocJSONPatch(oc, resourceNamespace, fmt.Sprintf("%s/%s", resourceKind, resourceName), []JSONp{
{"replace", "/spec/strategy/rollingUpdate/maxUnavailable", "50%"},
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
out, err := ocJSONPatch(oc, resourceNamespace, fmt.Sprintf("%s/%s", resourceKind, resourceName), []JSONp{
{"replace", "/spec/strategy/rollingUpdate/maxUnavailable", 1},
})
o.Expect(err).NotTo(o.HaveOccurred(), out)
}()
exutil.By("Check the deployment will not be reconciled back.")
err = wait.Poll(30*time.Second, 8*time.Minute, func() (bool, error) {
valueMaxUnavailable, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args(resourceKind, resourceName, "-o=jsonpath={.spec.strategy.rollingUpdate.maxUnavailable}", "-n", resourceNamespace).Output()
if err != nil {
return false, fmt.Errorf("oc get %s %s -n %s returned error: %v", resourceKind, resourceName, resourceNamespace, err)
}
if strings.Compare(valueMaxUnavailable, defaultValueMaxUnavailable) == 0 {
e2e.Logf("valueMaxUnavailable is %v. Waiting for deployment being reconciled...", valueMaxUnavailable)
return false, nil
}
return true, nil
})
if err != nil {
o.Expect(err.Error()).To(o.ContainSubstring("timed out waiting for the condition"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
af9c3e7d-dca0-448b-ac42-d8692a1c4511
|
Author:jiajliu-Medium-53906-The architecture info in clusterversion’s status should be correct
|
['"strings"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Author:jiajliu-Medium-53906-The architecture info in clusterversion’s status should be correct", func() {
const heterogeneousArchKeyword = "multi"
expectedArchMsg := "architecture=\"Multi\""
exutil.By("Get release info from current cluster")
releaseInfo, err := getReleaseInfo(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(releaseInfo).NotTo(o.BeEmpty())
exutil.By("Check the arch info cv.status is expected")
cvArchInfo, err := getCVObyJP(oc, ".status.conditions[?(.type=='ReleaseAccepted')].message")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Release payload info in cv.status: %v", cvArchInfo)
if releaseArch := gjson.Get(releaseInfo, `metadata.metadata.release\.openshift\.io/architecture`).String(); releaseArch != heterogeneousArchKeyword {
e2e.Logf("This current release is a non-heterogeneous payload")
//It's a non-heterogeneous payload, the architecture info in clusterversion’s status should be consistent with runtime.GOARCH.
output, err := oc.AsAdmin().WithoutNamespace().
Run("get").Args("nodes", "-o",
"jsonpath={.items[*].status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodesArchInfo := strings.Split(strings.TrimSpace(output), " ")
e2e.Logf("Nodes arch list: %v", nodesArchInfo)
for _, nArch := range nodesArchInfo {
if nArch != nodesArchInfo[0] {
e2e.Failf("unexpected node arch in non-hetero cluster: %s expecting: %s",
nArch, nodesArchInfo[0])
}
}
e2e.Logf("Expected arch info: %v", nodesArchInfo[0])
o.Expect(cvArchInfo).To(o.ContainSubstring(nodesArchInfo[0]))
} else {
e2e.Logf("This current release is a heterogeneous payload")
// It's a heterogeneous payload, the architecture info in clusterversion’s status should be multi.
e2e.Logf("Expected arch info: %v", expectedArchMsg)
o.Expect(cvArchInfo).To(o.ContainSubstring(expectedArchMsg))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
90a6424a-0304-46bc-b091-44a3ee8e49d1
|
Longduration-NonPreRelease-Author:jianl-high-68398-CVO reconcile SCC resources which have release.openshift.io/create-only: true [Slow]
|
['"context"', '"encoding/json"', '"fmt"', '"os"', '"path/filepath"', '"time"', '"github.com/tidwall/gjson"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Longduration-NonPreRelease-Author:jianl-high-68398-CVO reconcile SCC resources which have release.openshift.io/create-only: true [Slow]", func() {
exutil.By("Get default SCC spec")
scc := "restricted"
sccManifest := "0000_20_kube-apiserver-operator_00_scc-restricted.yaml"
tempDataDir, err := extractManifest(oc)
defer func() { o.Expect(os.RemoveAll(tempDataDir)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
goodSCCFile, getSCCFileErr := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").OutputToFile("ocp-68398.json")
o.Expect(getSCCFileErr).NotTo(o.HaveOccurred())
defer func() {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", goodSCCFile, "--force").Execute()).NotTo(o.HaveOccurred())
o.Expect(os.RemoveAll(goodSCCFile)).NotTo(o.HaveOccurred())
}()
originalOutputByte, readFileErr := os.ReadFile(goodSCCFile)
o.Expect(readFileErr).NotTo(o.HaveOccurred())
originalOutput := string(originalOutputByte)
o.Expect(originalOutput).Should(o.ContainSubstring("release.openshift.io/create-only"))
createOnly := gjson.Get(originalOutput, "metadata.annotations.release?openshift?io/create-only").Bool()
o.Expect(createOnly).Should(o.BeTrue())
// update allowHostIPC should not cause upgradeable=false and will not be reconsiled
originalAllowHostIPC := gjson.Get(originalOutput, "allowHostIPC").Bool()
ocJSONPatch(oc, "", fmt.Sprintf("scc/%s", scc), []JSONp{{"replace", "/allowHostIPC", !originalAllowHostIPC}})
o.Consistently(func() bool {
hostIPC_output, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
boolValue := gjson.Get(hostIPC_output, "allowHostIPC").Bool()
// boolValue == original_allowHostIPC means resource has been reconciled
return boolValue
}, 300*time.Second, 30*time.Second).ShouldNot(o.Equal(originalAllowHostIPC), "Error: allowHostIPC was reconciled back, check point: allowHostIPC")
upgradeableOutput, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(string(upgradeableOutput)).ShouldNot(o.ContainSubstring("Detected modified SecurityContextConstraints"), "Error occured in oc adm upgrade")
originalVolumes := gjson.Get(originalOutput, "volumes").Array()
ocJSONPatch(oc, "", fmt.Sprintf("scc/%s", scc), []JSONp{
{"remove", "/volumes/0", nil},
{"add", "/volumes/0", "Test"},
})
o.Consistently(func() bool {
volumesOutput, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
newVolumes := gjson.Get(volumesOutput, "volumes").Array()
return newVolumes[0].String() == "Test" && newVolumes[5].String() != originalVolumes[4].String()
}, 5*time.Minute, 30*time.Second).Should(o.BeTrue(), fmt.Sprintf("Error: %s was reconciled back, check point: volumes", scc))
upgradeableOutput, _ = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(string(upgradeableOutput)).ShouldNot(o.ContainSubstring("Detected modified SecurityContextConstraints"), "Error occured in oc adm upgrade")
// allowPrivilegeEscalation should be set to true immediately after removing it
pe_log, _ := ocJSONPatch(oc, "", fmt.Sprintf("scc/%s", scc), []JSONp{
{"remove", "/allowPrivilegeEscalation", nil},
})
e2e.Logf(string(pe_log))
o.Consistently(func() bool {
pe_output, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
pe_value := gjson.Get(pe_output, "allowPrivilegeEscalation").Bool()
return pe_value
}, 30*time.Second, 10*time.Second).Should(o.BeTrue(), "Error: allowPrivilegeEscalation is not true")
upgradeableOutput, _ = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(string(upgradeableOutput)).ShouldNot(o.ContainSubstring("Detected modified SecurityContextConstraints"), "Error occured in oc adm upgrade")
// SCC should be recreated after deleting it
outputBeforeDelete, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
resourceVersion := gjson.Get(outputBeforeDelete, "metadata.resourceVersion").String()
deleteLog, deleteErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("scc", scc).Output()
e2e.Logf("Delete scc %s: %s", scc, deleteLog)
o.Expect(deleteErr).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 10*time.Minute, true, func(context.Context) (bool, error) {
newOutput, newErr := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
if newErr != nil {
return false, nil
} else {
newResourceVersion := gjson.Get(newOutput, "metadata.resourceVersion").String()
return resourceVersion != newResourceVersion, nil
}
})
exutil.AssertWaitPollNoErr(err, "Error: SCC have not recreated after 5 minutes")
manifest := filepath.Join(tempDataDir, "manifest", sccManifest)
manifestContent, _ := os.ReadFile(manifest)
expectedValues, err := exutil.Yaml2Json(string(manifestContent))
o.Expect(err).NotTo(o.HaveOccurred())
finalOutput, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
o.Expect(finalOutput).Should(o.ContainSubstring("release.openshift.io/create-only"))
createOnly = gjson.Get(finalOutput, "metadata.annotations.release?openshift?io/create-only").Bool()
o.Expect(createOnly).Should(o.BeTrue())
final_allowHostIPC := gjson.Get(finalOutput, "allowHostIPC").Bool()
o.Expect(final_allowHostIPC).Should(o.Equal(gjson.Get(expectedValues, "allowHostIPC").Bool()), "allowHostIPC is not correct")
final_pe_value := gjson.Get(finalOutput, "allowPrivilegeEscalation").Bool()
pe := gjson.Get(expectedValues, "allowPrivilegeEscalation").Bool()
e2e.Logf("pe: %v", pe)
o.Expect(final_pe_value).Should(o.Equal(pe), "allowPrivilegeEscalation is not correct")
finalVolumes := gjson.Get(finalOutput, "volumes").Array()
expectedVolumes := gjson.Get(expectedValues, "volumes").Array()
o.Expect(len(finalVolumes)).Should(o.Equal(len(expectedVolumes)), "volumes have different number of expected values")
var finalResult []string
for _, v := range finalVolumes {
finalResult = append(finalResult, v.Str)
}
var expectedResult []string
for _, v := range expectedVolumes {
expectedResult = append(expectedResult, v.Str)
}
e2e.Logf("Final volumes are: %v", finalResult)
e2e.Logf("Expected volumes are: %v", expectedResult)
o.Expect(finalResult).Should(o.ContainElements(expectedResult), "volumns are not exact equal to manifest")
upgradeableOutput, _ = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(string(upgradeableOutput)).ShouldNot(o.ContainSubstring("Detected modified SecurityContextConstraints"), "Error occured in oc adm upgrade")
})
| |||||
test case
|
openshift/openshift-tests-private
|
78e58498-c8c8-40c2-a1b5-eb89ffe1aecc
|
Longduration-NonPreRelease-Author:jianl-high-68397-CVO reconciles SCC resources which do not have release.openshift.io/create-only: true [Disruptive]
|
['"encoding/json"', '"fmt"', '"os"', '"path/filepath"', '"reflect"', '"time"', 'g "github.com/onsi/ginkgo/v2"', '"github.com/tidwall/gjson"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Longduration-NonPreRelease-Author:jianl-high-68397-CVO reconciles SCC resources which do not have release.openshift.io/create-only: true [Disruptive]", func() {
scc := "restricted-v2"
exutil.By("Get default SCC spec")
sccManifest := "0000_20_kube-apiserver-operator_00_scc-restricted-v2.yaml"
tempDataDir, err := extractManifest(oc)
defer func() { o.Expect(os.RemoveAll(tempDataDir)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
goodSCCFile, getSCCFileErr := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").OutputToFile("ocp-68397-scc.json")
o.Expect(getSCCFileErr).NotTo(o.HaveOccurred())
defer func() {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", goodSCCFile, "--force").Execute()).NotTo(o.HaveOccurred())
o.Expect(os.RemoveAll(goodSCCFile)).NotTo(o.HaveOccurred())
output, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
e2e.Logf("New scc after runing apply command: \n %s", output)
}()
originalOutputByte, readFileErr := os.ReadFile(goodSCCFile)
o.Expect(readFileErr).NotTo(o.HaveOccurred())
originalOutput := string(originalOutputByte)
o.Expect(originalOutput).ShouldNot(o.ContainSubstring("release.openshift.io/create-only"))
// update allowHostIPC should not cause upgradeable=false and will be reconciled
originalAllowHostIPC := gjson.Get(originalOutput, "allowHostIPC").Bool()
ocJSONPatch(oc, "", fmt.Sprintf("scc/%s", scc), []JSONp{{"replace", "/allowHostIPC", !originalAllowHostIPC}})
var observedAllowHostIPC bool
var output string
err = wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
output, err = oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
if err != nil {
return false, err
} else {
observedAllowHostIPC = gjson.Get(output, "allowHostIPC").Bool()
// observedAllowHostIPC == original_allowHostIPC means resource has been reconciled
return observedAllowHostIPC == originalAllowHostIPC, nil
}
})
exutil.AssertWaitPollNoErr(err, "AllowHostIPC is not reconciled")
// there is no Upgradeable=False guard
o.Expect(checkUpdates(oc, false, 10, 30,
"Detected modified SecurityContextConstraints")).To(o.BeFalse(), "Error occured in oc adm upgrade after updating allowHostIPC")
// SCC should be recreated after deleting it
outputBeforeDelete, _ := oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
resourceVersion := gjson.Get(outputBeforeDelete, "metadata.resourceVersion").String()
deleteLog, deleteErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("scc", scc).Output()
e2e.Logf("Delete scc %s: %s", scc, deleteLog)
o.Expect(deleteErr).NotTo(o.HaveOccurred())
// wait some minutes scc will regenerated
var newErr error
err = wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
output, newErr = oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
if newErr != nil {
return false, nil
} else {
newResourceVersion := gjson.Get(output, "metadata.resourceVersion").String()
return resourceVersion != newResourceVersion, nil
}
})
exutil.AssertWaitPollNoErr(err, "Error: SCC have not recreated after 5 minutes")
o.Expect(checkUpdates(oc, false, 30, 60*3,
"Detected modified SecurityContextConstraints")).To(o.BeFalse(), "Error occured in oc adm upgrade after deleting scc")
manifest := filepath.Join(tempDataDir, "manifest", sccManifest)
manifestContent, _ := os.ReadFile(manifest)
expectedValues, err := exutil.Yaml2Json(string(manifestContent))
o.Expect(err).NotTo(o.HaveOccurred())
observedAllowHostIPC = gjson.Get(output, "allowHostIPC").Bool()
allowHostIPCManifest := gjson.Get(expectedValues, "allowHostIPC").Bool()
o.Expect(allowHostIPCManifest).Should(o.Equal(observedAllowHostIPC), "Error: allowHostIPC is not same with its value in manifest")
finalVolumes := gjson.Get(output, "volumes").Array()
expectedVolumesManifest := gjson.Get(expectedValues, "volumes").Array()
o.Expect(len(finalVolumes)).Should(o.Equal(len(expectedVolumesManifest)), "Error: volumes have different number of expected values")
var finalResult []string
for _, v := range finalVolumes {
finalResult = append(finalResult, v.Str)
}
var expectedResult []string
for _, v := range expectedVolumesManifest {
expectedResult = append(expectedResult, v.Str)
}
e2e.Logf("Final volumes are: %v", finalResult)
e2e.Logf("Expected volumes in menifest are: %v", expectedResult)
o.Expect(finalResult).Should(o.ContainElements(expectedResult), "Error: volumns are not exactly equal to manifest")
// allowPrivilegeEscalation should be set to true immediately after removing it
allowPrivilegeEscalationManifest := gjson.Get(manifest, "allowPrivilegeEscalation").Bool()
ocJSONPatch(oc, "", fmt.Sprintf("scc/%s", scc), []JSONp{
{"remove", "/allowPrivilegeEscalation", nil},
})
output, _ = oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
allowPrivilegeEscalation := gjson.Get(output, "allowPrivilegeEscalation").Bool()
o.Expect(allowPrivilegeEscalation).Should(o.BeTrue(), "Error: allowPrivilegeEscalation is not be set to true immediately")
err = wait.Poll(30*time.Second, 10*time.Minute, func() (bool, error) {
output, _ = oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
allowPrivilegeEscalation = gjson.Get(output, "allowPrivilegeEscalation").Bool()
return allowPrivilegeEscalation == allowPrivilegeEscalationManifest, nil
})
exutil.AssertWaitPollNoErr(err, "Error: allowPrivilegeEscalation is not be set to manifest")
o.Expect(checkUpdates(oc, false, 30, 60*3,
"Detected modified SecurityContextConstraints")).To(o.BeFalse(), "Error: upgrade guard error occured for SecurityContextConstraints")
ocJSONPatch(oc, "", fmt.Sprintf("scc/%s", scc), []JSONp{
{"remove", "/volumes/4", nil},
{"add", "/volumes/0", "Test"},
})
expectedResult = expectedResult[:0]
for _, v := range expectedVolumesManifest {
expectedResult = append(expectedResult, v.Str)
}
err = wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
output, _ = oc.AsAdmin().WithoutNamespace().
Run("get").Args("scc", scc, "-ojson").Output()
finalVolumes := gjson.Get(output, "volumes").Array()
if len(finalVolumes) != len(expectedResult) {
return false, nil
}
finalResult = finalResult[:0]
for _, v := range finalVolumes {
finalResult = append(finalResult, v.Str)
}
return reflect.DeepEqual(finalResult, expectedResult), nil
})
exutil.AssertWaitPollNoErr(err, "volumns are not correct")
o.Expect(checkUpdates(oc, false, 1, 10,
"Detected modified SecurityContextConstraints")).To(o.BeFalse(), "Error: There should not be upgradeable=false gate for non-4.13 cluster")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f945fed8-3c95-4f24-9f9f-587b6397921d
|
NonPreRelease-Author:jiajliu-Medium-70931-CVO reconcile metadata on ClusterOperators [Disruptive]
|
['"encoding/json"', '"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("NonPreRelease-Author:jiajliu-Medium-70931-CVO reconcile metadata on ClusterOperators [Disruptive]", func() {
var annotationCOs []annotationCO
resourcePath := "/metadata/annotations"
exutil.By("Remove metadata.annotation")
operatorName, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("co", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
operatorList := strings.Fields(operatorName)
defer func() {
for _, annotationCO := range annotationCOs {
anno, _ := oc.AsAdmin().WithoutNamespace().Run("get").
Args("co", annotationCO.name, "-o=jsonpath={.metadata.annotations}").Output()
if anno == "" {
_, err = ocJSONPatch(oc, "", "clusteroperator/"+annotationCO.name, []JSONp{{"add", resourcePath, annotationCO.annotation}})
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}()
for _, op := range operatorList {
var anno map[string]string
annoOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("co", op, "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = json.Unmarshal([]byte(annoOutput), &anno)
o.Expect(err).NotTo(o.HaveOccurred())
annotationCOs = append(annotationCOs, annotationCO{op, anno})
_, err = ocJSONPatch(oc, "", "clusteroperator/"+op, []JSONp{{"remove", resourcePath, nil}})
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Check metadata.annotation is reconciled back")
for _, op := range operatorList {
o.Eventually(func() string {
anno, _ := oc.AsAdmin().WithoutNamespace().Run("get").
Args("co", op, "-o=jsonpath={.metadata.annotations}").Output()
return anno
}, 5*time.Minute, 1*time.Minute).ShouldNot(o.BeEmpty(), fmt.Sprintf("Fail to reconcile metadata of %s", op))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
48e7d25d-2db8-4fd2-99df-08e2b1ab80d9
|
Author:jianl-ConnectedOnly-Medium-77520-oc adm upgrade recommend
|
['"context"', '"encoding/json"', '"fmt"', '"os"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"cloud.google.com/go/storage"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/cvo.go
|
g.It("Author:jianl-ConnectedOnly-Medium-77520-oc adm upgrade recommend", func() {
exutil.By("Check if it's a GCP cluster")
exutil.SkipIfPlatformTypeNot(oc, "gcp")
exutil.By("export OC_ENABLE_CMD_UPGRADE_RECOMMEND=true")
os.Setenv("OC_ENABLE_CMD_UPGRADE_RECOMMEND", "true")
defer func() { os.Setenv("OC_ENABLE_CMD_UPGRADE_RECOMMEND", "") }()
exutil.By("oc adm upgrade recommend --help")
help, err := oc.AsAdmin().WithoutNamespace().Run("adm").
Args("upgrade", "recommend", "--help").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(help).Should(o.ContainSubstring("This subcommand is read-only and does not affect the state of the cluster. To request an update, use the 'oc adm upgrade' subcommand."))
o.Expect(help).Should(o.ContainSubstring("--show-outdated-releases=false"))
o.Expect(help).Should(o.ContainSubstring("--version=''"))
exutil.By("Update graph data")
testDataDir := exutil.FixturePath("testdata", "ota/cvo")
graphFile := filepath.Join(testDataDir, "cincy-77520.json")
e2e.Logf("Origin graph template file path: ", graphFile)
dest := filepath.Join(testDataDir, "cincy-77520_bak.json")
err = copy(graphFile, dest)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { os.Remove(dest) }()
version, err := getCVObyJP(oc, ".status.history[0].version")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current OCP version is: ", version)
major_minor := strings.Split(string(version), ".")
exutil.By("Update graphFile with real version")
err = updateFile(dest, "current_version", version)
o.Expect(err).NotTo(o.HaveOccurred())
err = updateFile(dest, "major", major_minor[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = updateFile(dest, "minor", major_minor[1])
o.Expect(err).NotTo(o.HaveOccurred())
next, _ := strconv.Atoi(major_minor[1])
next_minor := strconv.Itoa(next + 1)
err = updateFile(dest, "next", next_minor)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("New graph template file path: ", dest)
exutil.By("Patch upstream and channel")
projectID := "openshift-qe"
ctx := context.Background()
client, err := storage.NewClient(ctx)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(client.Close()).NotTo(o.HaveOccurred()) }()
graphURL, bucket, object, _, _, err := buildGraph(
client, oc, projectID, dest)
defer func() { o.Expect(DeleteBucket(client, bucket)).NotTo(o.HaveOccurred()) }()
defer func() { o.Expect(DeleteObject(client, bucket, object)).NotTo(o.HaveOccurred()) }()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = ocJSONPatch(oc, "", "clusterversion/version", []JSONp{
{"add", "/spec/upstream", graphURL},
{"add", "/spec/channel", "channel-b"},
})
o.Expect(err).NotTo(o.HaveOccurred())
z_stream_version1 := fmt.Sprintf("%s.%s.998", major_minor[0], major_minor[1])
z_stream_version2 := fmt.Sprintf("%s.%s.999", major_minor[0], major_minor[1])
y_stream_version1 := fmt.Sprintf("%s.%s.997", major_minor[0], next_minor)
y_stream_version2 := fmt.Sprintf("%s.%s.998", major_minor[0], next_minor)
y_stream_version3 := fmt.Sprintf("%s.%s.999", major_minor[0], next_minor)
exutil.By("Check oc adm upgrade recommend")
// We need to wait some minutes for the first time to get recommend after patch upstream
err = wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "recommend").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, "Upstream: "+graphURL) {
return false, nil
}
if !strings.Contains(output, "Channel:") {
return false, nil
}
if !strings.Contains(output, z_stream_version1+" no known issues relevant to this cluster") {
return false, nil
}
if !strings.Contains(output, z_stream_version2+" no known issues relevant to this cluster") {
return false, nil
}
if !strings.Contains(output, y_stream_version3+" no known issues relevant to this cluster") {
return false, nil
}
if !strings.Contains(output, "MultipleReasons") {
return false, nil
}
//major.next.997 is older than major.next.998 and major.next.999, so should not display it in output
if strings.Contains(output, y_stream_version1) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "oc adm upgrade recommend fail")
output, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "recommend").Output()
e2e.Logf("output: \n", output)
exutil.By("Check oc adm upgrade recommend --show-outdated-releases")
output, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "recommend", "--show-outdated-releases").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("output: \n", output)
o.Expect(output).Should(o.ContainSubstring("Upstream: " + graphURL))
o.Expect(output).Should(o.ContainSubstring("Channel"))
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("%s no known issues relevant to this cluster", z_stream_version1)))
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("%s no known issues relevant to this cluster", z_stream_version2)))
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("%s no known issues relevant to this cluster", y_stream_version1)))
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("%s MultipleReasons", y_stream_version2)))
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("%s no known issues relevant to this cluster", y_stream_version3)))
exutil.By("Check oc adm upgrade recommend --version " + y_stream_version2)
output, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "recommend", "--version", y_stream_version2).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("output: \n", output)
o.Expect(output).Should(o.ContainSubstring("Upstream: " + graphURL))
o.Expect(output).Should(o.ContainSubstring("Channel"))
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("Update to %s Recommended=False", y_stream_version2)))
o.Expect(output).Should(o.ContainSubstring("Reason: MultipleReasons"))
o.Expect(output).Should(o.ContainSubstring("On clusters on default invoker user, this imaginary bug can happen"))
o.Expect(output).Should(o.ContainSubstring("Too many CI failures on this release, so do not update to it"))
exutil.By("Check oc adm upgrade recommend --version " + y_stream_version3)
output, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "recommend", "--version", y_stream_version3).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("output: \n", output)
o.Expect(output).Should(o.ContainSubstring("Upstream: " + graphURL))
o.Expect(output).Should(o.ContainSubstring("Channel"))
expected_msg := fmt.Sprintf("Update to %s has no known issues relevant to this cluster.", y_stream_version3)
o.Expect(output).Should(o.ContainSubstring(expected_msg))
o.Expect(output).Should(o.ContainSubstring("Image: quay.io/openshift-release-dev/ocp-release@sha256:d2d34aafe0adda79953dd928b946ecbda34673180ee9a80d2ee37c123a0f510c"))
o.Expect(output).Should(o.ContainSubstring("Release URL: https://amd64.ocp.releases.ci.openshift.org/releasestream/4-dev-preview/release/4.y+1.0"))
exutil.By("Check oc adm upgrade recommend --version 4.999.999")
output, _ = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "recommend", "--version", "4.999.999").Output()
e2e.Logf("output: \n", output)
o.Expect(output).Should(o.ContainSubstring("Upstream: " + graphURL))
o.Expect(output).Should(o.ContainSubstring("Channel"))
o.Expect(output).Should(o.ContainSubstring("error: no updates to 4.999 available, so cannot display context for the requested release 4.999.999"))
})
| |||||
file
|
openshift/openshift-tests-private
|
df54f9b0-c547-4e57-84ac-4ad19d66917a
|
utils
|
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"cloud.google.com/go/storage"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
package cvo
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"cloud.google.com/go/storage"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// JSONp defines a json struct
type JSONp struct {
Oper string `json:"op"`
Path string `json:"path"`
Valu interface{} `json:"value,omitempty"`
}
type annotationCO struct {
name string
annotation map[string]string
}
// copy
// @Description Copy a file from src to target
// @Create jianl Jan 22 2025
// @Param src string the origin file path
// @Param target string the new file path
// @Return nil
func copy(src, target string) error {
bytesRead, err := ioutil.ReadFile(src)
if err != nil {
return err
}
if exit, _ := PathExists(target); exit {
os.Remove(target)
}
err = ioutil.WriteFile(target, bytesRead, 0o755)
if err != nil {
return err
}
return nil
}
// GetDeploymentsYaml dumps out deployment in yaml format in specific namespace
func GetDeploymentsYaml(oc *exutil.CLI, deploymentName string, namespace string) (string, error) {
e2e.Logf("Dumping deployments %s from namespace %s", deploymentName, namespace)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deploymentName, "-n", namespace, "-o", "yaml").Output()
if err != nil {
e2e.Logf("Error dumping deployments: %v", err)
return "", err
}
e2e.Logf(out)
return out, err
}
// PodExec executes a single command or a bash script in the running pod. It returns the
// command output and error if the command finished with non-zero status code or the
// command took longer than 3 minutes to run.
func PodExec(oc *exutil.CLI, script string, namespace string, podName string) (string, error) {
var out string
waitErr := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) {
var err error
out, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podName, "--", "/bin/bash", "-c", script).Output()
return true, err
})
return out, waitErr
}
// WaitForAlert check if an alert appears
// Return value: bool: indicate if the alert is found
// Return value: map: annotation map which contains reason and message information
// Retrun value: error: any error
func waitForAlert(oc *exutil.CLI, alertString string, interval time.Duration, timeout time.Duration, state string) (bool, map[string]string, error) {
if len(state) > 0 {
if state != "pending" && state != "firing" {
return false, nil, fmt.Errorf("state %s is not supported", state)
}
}
e2e.Logf("Waiting for alert %s pending or firing...", alertString)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(
"-n", "openshift-monitoring",
"route", "prometheus-k8s",
"-o=jsonpath={.spec.host}").Output()
if err != nil || len(url) == 0 {
return false, nil, fmt.Errorf("error getting the hostname of route prometheus-k8s %v", err)
}
token, err := exutil.GetSAToken(oc)
if err != nil || len(token) == 0 {
return false, nil, fmt.Errorf("error getting SA token %v", err)
}
alertCMD := fmt.Sprintf("curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\")'", token, url, alertString)
alertAnnoCMD := fmt.Sprintf("curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\").annotations'", token, url, alertString)
alertStateCMD := fmt.Sprintf("curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\").state'", token, url, alertString)
// Poll returns timed out waiting for the condition when timeout is reached
count := 0
if pollErr := wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
count++
metrics, err := exec.Command("bash", "-c", alertCMD).Output()
if err != nil {
e2e.Logf("Error retrieving prometheus alert metrics: %v, retry %d...", err, count)
return false, nil
}
if len(string(metrics)) == 0 {
e2e.Logf("Prometheus alert metrics nil, retry %d...", count)
return false, nil
}
if len(state) > 0 {
alertState, err := exec.Command("bash", "-c", alertStateCMD).Output()
if err != nil {
return false, fmt.Errorf("error getting alert state")
}
if state == "pending" && string(alertState) != "pending" {
return false, fmt.Errorf("alert state is not expected, expected pending but actual is %s", string(alertState))
}
if state == "firing" {
if int(interval)*count < int(timeout) {
if string(alertState) == "pending" {
e2e.Logf("Prometheus alert state is pending, waiting for firing, retry %d...", count)
return false, nil
}
return false, fmt.Errorf("alert state is not expected, expected pending in the waiting time window but actual is %s", string(alertState))
} else if string(alertState) == "firing" {
return true, nil
} else {
return false, fmt.Errorf("alert state is not expected, expected firing when the waiting time is reached but actual is %s", string(alertState))
}
}
return true, nil
}
return true, nil
}); pollErr != nil {
return false, nil, pollErr
}
e2e.Logf("Alert %s found", alertString)
annotation, err := exec.Command("bash", "-c", alertAnnoCMD).Output()
if err != nil || len(string(annotation)) == 0 {
return true, nil, fmt.Errorf("error getting annotation for alert %s", alertString)
}
var annoMap map[string]string
if err := json.Unmarshal(annotation, &annoMap); err != nil {
return true, nil, fmt.Errorf("error converting annotation to map for alert %s", alertString)
}
return true, annoMap, nil
}
// Check if operator's condition is expected until timeout or return true or an error happened.
func waitForCondition(oc *exutil.CLI, interval time.Duration, timeout time.Duration, expectedCondition string, args ...string) error {
e2e.Logf("Checking condition for: oc %v", args)
err := wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run(args[0]).Args(args[1:]...).Output()
if err != nil {
e2e.Logf("Checking condition error:%v", err)
return false, err
}
condition := strings.Replace(string(output), "\n", "", -1)
if strings.Compare(condition, expectedCondition) != 0 {
e2e.Logf("Current condition is: '%s' Waiting for condition to be '%s'...", condition, expectedCondition)
return false, nil
}
e2e.Logf("Current condition is: %v", condition)
return true, nil
})
if err != nil {
return err
}
return nil
}
// Get detail alert info by selector
func getAlert(oc *exutil.CLI, alertSelector string) map[string]interface{} {
var alertInfo map[string]interface{}
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(
"-n", "openshift-monitoring",
"route", "prometheus-k8s",
"-o=jsonpath={.spec.host}").Output()
if err != nil || len(url) == 0 {
e2e.Logf("error getting the hostname of route prometheus-k8s %v", err)
return nil
}
token, err := exutil.GetSAToken(oc)
if err != nil || len(token) == 0 {
e2e.Logf("error getting SA token %v", err)
return nil
}
command := fmt.Sprintf("curl -skH \"Authorization: Bearer %s\" https://%s/api/v1/alerts"+
" | jq -r '[.data.alerts[]|select(%s)][0]'", token, url, alertSelector)
output, err := exec.Command("bash", "-c", command).Output()
if err != nil {
e2e.Logf("Getting alert error:%v for %s", err, strings.ReplaceAll(command, token[5:], "*****"))
return nil
}
if len(output) == 0 {
e2e.Logf("No alert found for %v", alertSelector)
return nil
}
err = json.Unmarshal(output, &alertInfo)
if err != nil {
e2e.Logf("Unmarshal alert error:%v in %s for %s", err, output, strings.ReplaceAll(command, token[5:], "*****"))
return nil
}
e2e.Logf("Alert found: %v", alertInfo)
return alertInfo
}
// Get detail alert info by alertname
func getAlertByName(oc *exutil.CLI, alertName string, name string) map[string]interface{} {
return getAlert(oc, fmt.Sprintf(".labels.alertname == \"%s\" and .labels.name == \"%s\"", alertName, name))
}
// CreateBucket creates a new bucket in the gcs
// projectID := "my-project-id"
// bucketName := "bucket-name"
// return value: error: any error
func CreateBucket(client *storage.Client, projectID, bucketName string) error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := client.Bucket(bucketName).Create(ctx, projectID, nil); err != nil {
return err
}
return nil
}
// UploadFile uploads a gcs object
// bucket := "bucket-name"
// object := "object-name"
// return value: error: any error
func UploadFile(client *storage.Client, bucket, object, file string) error {
// Open local file
f, err := os.Open(file)
if err != nil {
return fmt.Errorf("os.Open: %v", err)
}
defer f.Close()
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*50)
defer cancel()
// Upload an object with storage.Writer.
wc := client.Bucket(bucket).Object(object).NewWriter(ctx)
if _, err = io.Copy(wc, f); err != nil {
return fmt.Errorf("io.Copy: %v", err)
}
if err := wc.Close(); err != nil {
return fmt.Errorf("Writer.Close: %v", err)
}
return nil
}
// MakePublic makes a gcs object public
// bucket := "bucket-name"
// object := "object-name"
// return value: error: any error
func MakePublic(client *storage.Client, bucket, object string) error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
acl := client.Bucket(bucket).Object(object).ACL()
if err := acl.Set(ctx, storage.AllUsers, storage.RoleReader); err != nil {
return err
}
return nil
}
// DeleteObject deletes the gcs object
// return value: error: any error
func DeleteObject(client *storage.Client, bucket, object string) error {
if object == "" {
return nil
}
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
o := client.Bucket(bucket).Object(object)
if err := o.Delete(ctx); err != nil {
return err
}
e2e.Logf("Object: %v deleted", object)
return nil
}
// DeleteBucket deletes gcs bucket
// return value: error: any error
func DeleteBucket(client *storage.Client, bucketName string) error {
if bucketName == "" {
return nil
}
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := client.Bucket(bucketName).Delete(ctx); err != nil {
return err
}
e2e.Logf("Bucket: %v deleted", bucketName)
return nil
}
// GenerateReleaseVersion generates a fake release version based on source release version
func GenerateReleaseVersion(oc *exutil.CLI) string {
sourceVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.version}").Output()
if err != nil {
return ""
}
splits := strings.Split(sourceVersion, ".")
if len(splits) > 1 {
if sourceMinorNum, err := strconv.Atoi(splits[1]); err == nil {
targeMinorNum := sourceMinorNum + 1
splits[1] = strconv.Itoa(targeMinorNum)
return strings.Join(splits, ".")
}
}
return ""
}
// GenerateReleasePayload generates a fake release payload based on source release payload by default
func GenerateReleasePayload(oc *exutil.CLI) string {
var targetDigest string
sourcePayload, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.image}").Output()
if err != nil {
return ""
}
data := make([]byte, 10)
if _, err := rand.Read(data); err == nil {
sh256Bytes := sha256.Sum256(data)
targetDigest = hex.EncodeToString(sh256Bytes[:])
} else {
return ""
}
splits := strings.Split(sourcePayload, ":")
if len(splits) > 1 {
splits[1] = targetDigest
return strings.Join(splits, ":")
}
return ""
}
// updateGraph updates the cincy.json
// return value: string: graph json filename
// return value: string: target version
// return value: string: target payload
// return value: error: any error
func updateGraph(oc *exutil.CLI, graphTemplate string) (string, string, string, error) {
e2e.Logf("Graph Template: %v", graphTemplate)
// Assume the cluster is not being upgraded, then desired version will be the current version
sourceVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.version}").Output()
if err != nil {
return "", "", "", err
}
sourcePayload, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.image}").Output()
if err != nil {
return "", "", "", err
}
targetVersion := GenerateReleaseVersion(oc)
if targetVersion == "" {
return "", "", "", fmt.Errorf("error get target version")
}
targetPayload := GenerateReleasePayload(oc)
if targetPayload == "" {
return "", "", "", fmt.Errorf("error get target payload")
}
err = updateFile(graphTemplate, "sourceversion", sourceVersion)
if err != nil {
return "", "", "", err
}
err = updateFile(graphTemplate, "sourcepayload", sourcePayload)
if err != nil {
return "", "", "", err
}
err = updateFile(graphTemplate, "targetversion", targetVersion)
if err != nil {
return "", "", "", err
}
err = updateFile(graphTemplate, "targetpayload", targetPayload)
if err != nil {
return "", "", "", err
}
return graphTemplate, targetVersion, targetPayload, nil
}
// buildGraph
// @Description creates a gcs bucket, upload the graph file and make it public for CVO to use
// @Create jianl Jan 22 2025
// @Param client storage.Client Google storage client
// @Param oc exutil.CLI Instance of oc CLI
// @Param projectID string Google storage project ID
// @Param graphName string Absolute graph file path or graph file name in fixture folder
// @Return the public url of the object
func buildGraph(client *storage.Client, oc *exutil.CLI, projectID string, graphName string) (
url string, bucket string, object string, targetVersion string, targetPayload string, err error) {
var graphFile string
var resp *http.Response
var body []byte
// If given a full file path, then we use it directly, otherwith find it in fixture folder
if exit, _ := PathExists(graphName); !exit {
graphDataDir := exutil.FixturePath("testdata", "ota/cvo")
graphName = filepath.Join(graphDataDir, graphName)
}
if graphFile, targetVersion, targetPayload, err = updateGraph(oc, graphName); err != nil {
return
}
e2e.Logf("Graph file: %v updated", graphFile)
// Give the bucket a unique name
bucket = fmt.Sprintf("ocp-ota-%d", time.Now().Unix())
if err = CreateBucket(client, projectID, bucket); err != nil {
return
}
e2e.Logf("Bucket: %v created", bucket)
// Give the object a unique name
object = fmt.Sprintf("graph-%d", time.Now().Unix())
if err = UploadFile(client, bucket, object, graphFile); err != nil {
return
}
e2e.Logf("Object: %v uploaded", object)
// Make the object public
if err = MakePublic(client, bucket, object); err != nil {
return
}
e2e.Logf("Object: %v public", object)
url = fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object)
// testing endpoint accessible and logging graph contents
if resp, err = http.Get(url); err == nil {
defer resp.Body.Close()
if body, err = io.ReadAll(resp.Body); err == nil {
e2e.Logf(string(body))
}
}
return
}
// restoreCVSpec restores upstream and channel of clusterversion
// if no need to restore, pass "nochange" to the argument(s)
func restoreCVSpec(upstream string, channel string, oc *exutil.CLI) {
e2e.Logf("Restoring upgrade graph to '%s' channel to '%s'", upstream, channel)
if channel != "nochange" {
_ = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "channel", "--allow-explicit-channel", channel).Execute()
time.Sleep(5 * time.Second)
currChannel, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[].spec.channel}").Output()
if currChannel != channel {
e2e.Logf("Error on channel recovery, expected %s, but got %s", channel, currChannel)
}
}
if upstream != "nochange" {
if upstream == "" {
_ = oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterversion/version", "--type=json", "-p", "[{\"op\":\"remove\", \"path\":\"/spec/upstream\"}]").Execute()
} else {
_ = oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterversion/version", "--type=merge", "--patch", fmt.Sprintf("{\"spec\":{\"upstream\":\"%s\"}}", upstream)).Execute()
}
currUpstream, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[].spec.upstream}").Output()
if currUpstream != upstream {
e2e.Logf("Error on upstream recovery, expected %s, but got %s", upstream, currUpstream)
}
}
}
// Run "oc adm release extract" cmd to extract manifests from current live cluster
func extractManifest(oc *exutil.CLI) (tempDataDir string, err error) {
tempDataDir = filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
if err = os.Mkdir(tempDataDir, 0755); err != nil {
err = fmt.Errorf("failed to create directory: %v", err)
return
}
if err = oc.AsAdmin().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--confirm", "--to="+tempDataDir).Execute(); err != nil {
err = fmt.Errorf("failed to extract dockerconfig: %v", err)
return
}
manifestDir := filepath.Join(tempDataDir, "manifest")
if err = oc.AsAdmin().Run("adm").Args("release", "extract", "--to", manifestDir, "-a", tempDataDir+"/.dockerconfigjson").Execute(); err != nil {
e2e.Logf("warning: release extract failed once with:\n\"%v\"", err)
//Workaround disconnected baremental clusters that don't have cert for the registry
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
var mirror_registry string
mirror_registry, err = exutil.GetMirrorRegistry(oc)
if mirror_registry != "" {
if err != nil {
err = fmt.Errorf("error out getting mirror registry: %v", err)
return
}
if err = oc.AsAdmin().Run("adm").Args("release", "extract", "--insecure", "--to", manifestDir, "-a", tempDataDir+"/.dockerconfigjson").Execute(); err != nil {
err = fmt.Errorf("warning: insecure release extract for disconnected baremetal failed with:\n\"%v\"", err)
}
return
}
}
//Workaround c2s/cs2s clusters that only have token to the mirror in pull secret
var region, image, mirror string
if region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure",
"cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output(); err != nil {
err = fmt.Errorf("failed to get cluster region: %v", err)
return
}
// region us-iso-* represent C2S, us-isob-* represent SC2S
if !strings.Contains(region, "us-iso-") && !strings.Contains(region, "us-isob-") {
err = fmt.Errorf("oc adm release failed, and no retry for non-c2s/cs2s region: %s", region)
return
}
if image, err = exutil.GetReleaseImage(oc); err != nil {
err = fmt.Errorf("failed to get cluster release image: %v", err)
return
}
if mirror, err = oc.AsAdmin().Run("get").Args("ImageContentSourcePolicy",
"-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err != nil {
err = fmt.Errorf("failed to acquire mirror from ICSP: %v", err)
return
}
if err = oc.AsAdmin().Run("adm").Args("release", "extract",
"--from", fmt.Sprintf("%s@%s", mirror, strings.Split(image, "@")[1]),
"--to", manifestDir, "-a", tempDataDir+"/.dockerconfigjson", "--insecure").Execute(); err != nil {
err = fmt.Errorf("failed to extract manifests: %v", err)
return
}
}
return
}
// Run "oc adm release extract --included --install-config" cmd to extract manifests
func extractIncludedManifestWithInstallcfg(oc *exutil.CLI, creds bool, cfg string, image string, cloud string) (tempDataDir string, err error) {
tempDataDir = filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
var out string
if err = os.Mkdir(tempDataDir, 0755); err != nil {
err = fmt.Errorf("failed to create directory: %v", err)
return
}
if creds && cloud != "" {
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--install-config", cfg, "--included", "--credentials-requests", "--cloud", cloud, "--from", image, "--to", tempDataDir).Output()
} else if creds {
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--install-config", cfg, "--included", "--credentials-requests", "--from", image, "--to", tempDataDir).Output()
} else if cloud != "" {
err = fmt.Errorf("--cloud only works with --credentials-requests,creds_var: %v,cloud_var: %v", creds, cloud)
} else {
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--install-config", cfg, "--included", "--from", image, "--to", tempDataDir).Output()
}
if err != nil {
err = fmt.Errorf("failed to extract manifest: %v, command output:%v", err, out)
return
}
return
}
func getDefaultCapsInCR(version string) []string {
switch version {
case "4.19":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.18":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.17":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.16":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.15":
return []string{"CloudCredential", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.14":
return []string{"Storage", "MachineAPI"}
default:
e2e.Logf("Unknown version:%s detected!", version)
return nil
}
}
func getRandomPlatform() string {
types := [...]string{"aws", "azure", "gcp", "vsphere"}
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
index := seed.Intn(len(types) - 1)
return types[index]
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
// get clusterversion version object values by jsonpath.
// Returns: object_value(string), error
func getCVObyJP(oc *exutil.CLI, jsonpath string) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").
Args("clusterversion", "version",
"-o", fmt.Sprintf("jsonpath={%s}", jsonpath)).Output()
}
// find argument index in CVO container args in deployment (by arg name).
// Returns: arg_value(string), arg_index(int), error
func getCVOcontArg(oc *exutil.CLI, argQuery string) (string, int, error) {
depArgs, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("-n", "openshift-cluster-version",
"deployment", "cluster-version-operator",
"-o", "jsonpath={.spec.template.spec.containers[0].args}").Output()
if err != nil {
e2e.Logf("Error getting cvo deployment args: %v", err)
return "", -1, err
}
var result []string
err = json.Unmarshal([]byte(depArgs), &result)
if err != nil {
e2e.Logf("Error Unmarshal cvo deployment args: %v", err)
return "", -1, err
}
for index, arg := range result {
if strings.Contains(arg, argQuery) {
e2e.Logf("query '%s' found '%s' at Index: %d", argQuery, arg, index)
val := strings.Split(arg, "=")
if len(val) > 1 {
return val[1], index, nil
}
return val[0], index, nil
}
}
return "", -1, fmt.Errorf("error: cvo deployment arg %s not found", argQuery)
}
// patch resource (namespace - use "" if none, resource_name, patch).
// Returns: result(string), error
func ocJSONPatch(oc *exutil.CLI, namespace string, resource string, patch []JSONp) (patchOutput string, err error) {
p, err := json.Marshal(patch)
if err != nil {
e2e.Logf("ocJSONPatch Error - json.Marshal: '%v'", err)
o.Expect(err).NotTo(o.HaveOccurred())
}
if namespace != "" {
patchOutput, err = oc.AsAdmin().WithoutNamespace().Run("patch").
Args("-n", namespace, resource, "--type=json", "--patch", string(p)).Output()
} else {
patchOutput, err = oc.AsAdmin().WithoutNamespace().Run("patch").
Args(resource, "--type=json", "--patch", string(p)).Output()
}
e2e.Logf("patching '%s'\nwith '%s'\nresult '%s'", resource, string(p), patchOutput)
return
}
// patch CVO container argument (arg_index, arg_value)
// Returns: result(string), error
func patchCVOcontArg(oc *exutil.CLI, index int, value string) (string, error) {
patch := []JSONp{
{"replace",
fmt.Sprintf("/spec/template/spec/containers/0/args/%d", index),
value},
}
return ocJSONPatch(oc,
"openshift-cluster-version",
"deployment/cluster-version-operator",
patch)
}
// Get updates by using "oc adm upgrade ..." command in the given timeout
// Check expStrings in the result of the updates
// Returns: true - found, false - not found
func checkUpdates(oc *exutil.CLI, conditional bool, interval time.Duration, timeout time.Duration, expStrings ...string) bool {
var (
cmdOut string
err error
)
if pollErr := wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
if conditional {
cmdOut, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "--include-not-recommended").Output()
} else {
cmdOut, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
}
for _, str := range expStrings {
if !strings.Contains(cmdOut, str) || err != nil {
return false, err
}
}
return true, nil
}); pollErr != nil {
e2e.Logf("last oc adm upgrade returned:\n%s\nstderr: %v\nexpecting:\n%s\n", cmdOut, err, strings.Join(expStrings, "\n\n"))
return false
}
return true
}
// change the spec.capabilities
// if base==true, change the baselineCapabilitySet, otherwise, change the additionalEnabledCapabilities
func changeCap(oc *exutil.CLI, base bool, cap interface{}) (string, error) {
var spec string
if base {
spec = "/spec/capabilities/baselineCapabilitySet"
} else {
spec = "/spec/capabilities/additionalEnabledCapabilities"
}
if cap == nil {
return ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"remove", spec, nil}})
}
// if spec.capabilities is not present, patch to add capabilities
orgCap, err := getCVObyJP(oc, ".spec.capabilities")
if err != nil {
return "", err
}
if orgCap == "" {
value := make(map[string]interface{})
_, err = ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"add", "/spec/capabilities", value}})
if err != nil {
return "", err
}
}
return ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"add", spec, cap}})
}
// verifies that the capabilities list passed to this func have resources enabled in a cluster
func verifyCaps(oc *exutil.CLI, caps []string) (err error) {
// Important! this map should be updated each version with new capabilities, as they added to openshift.
capability_operators := map[string]string{
"baremetal": "baremetal",
"Console": "console",
"Insights": "insights",
"marketplace": "marketplace",
"Storage": "storage",
"openshift-samples": "openshift-samples",
"CSISnapshot": "csi-snapshot-controller",
"NodeTuning": "node-tuning",
"MachineAPI": "machine-api",
"Build": "build",
"DeploymentConfig": "dc",
"ImageRegistry": "image-registry",
"OperatorLifecycleManager": "operator-lifecycle-manager",
"CloudCredential": "cloud-credential",
"Ingress": "ingress",
"CloudControllerManager": "cloud-controller-manager",
"OperatorLifecycleManagerV1": "olm",
}
for _, cap := range caps {
prefix := "co"
if cap == "Build" || cap == "DeploymentConfig" {
prefix = "-A" // special case for caps that isn't co but a resource
}
// if there's a new cap missing in capability_operators - return error
if capability_operators[cap] == "" {
return fmt.Errorf("new unknown capability '%v'. please update automation: capability_operators in utils.go", cap)
}
if _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(prefix, capability_operators[cap]).Output(); err != nil {
return
}
}
return
}
// waits for string 'message' to appear in CVO 'jsonpath'.
// or waits for message to disappear if waitingToAppear=false.
// returns error if any.
func waitForCVOStatus(oc *exutil.CLI, interval time.Duration, timeout time.Duration, message string, jsonpath string, waitingToAppear bool) (err error) {
var prefix, out string
if !waitingToAppear {
prefix = "not "
}
e2e.Logf("Waiting for CVO '%s' %sto contain '%s'", jsonpath, prefix, message)
err = wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
out, err = getCVObyJP(oc, jsonpath)
return strings.Contains(out, message) == waitingToAppear, err
})
if err != nil {
if strings.Compare(err.Error(), "timed out waiting for the condition") == 0 {
out, _ = getCVObyJP(oc, ".status.conditions")
err = fmt.Errorf("reached time limit of %s waiting for CVO %s %sto contain '%s', dumping conditions:\n%s",
timeout*time.Second, strings.NewReplacer(".status.conditions[?(.type=='", "", "')].", " ").Replace(jsonpath), prefix, message, out)
return
}
err = fmt.Errorf("while waiting for CVO %sto contain '%s', an error was received: %s %s", prefix, message, out, err.Error())
e2e.Logf(err.Error())
}
return
}
func setCVOverrides(oc *exutil.CLI, resourceKind string, resourceName string, resourceNamespace string) (err error) {
type ovrd struct {
Ki string `json:"kind"`
Na string `json:"name"`
Ns string `json:"namespace"`
Un bool `json:"unmanaged"`
Gr string `json:"group"`
}
var ovPatch string
if ovPatch, err = ocJSONPatch(oc, "", "clusterversion/version", []JSONp{
{"add", "/spec/overrides", []ovrd{{resourceKind, resourceName, resourceNamespace, true, "apps"}}}}); err != nil {
return fmt.Errorf("patching /spec/overrides failed with: %s %v", ovPatch, err)
}
// upgradeable .reason may be ClusterVersionOverridesSet or MultipleReasons, but .message have to contain "overrides"
e2e.Logf("Waiting for Upgradeable to contain overrides message...")
if err = waitForCVOStatus(oc, 30, 8*60,
"Disabling ownership via cluster version overrides prevents upgrades",
".status.conditions[?(.type=='Upgradeable')].message", true); err != nil {
return
}
e2e.Logf("Waiting for ClusterVersionOverridesSet in oc adm upgrade...")
if !checkUpdates(oc, false, 30, 8*60, "ClusterVersionOverridesSet") {
return fmt.Errorf("no overrides message in oc adm upgrade within 8m")
}
e2e.Logf("Waiting for Progressing=false...")
//to workaround the fake upgrade by cv.overrrides, refer to https://issues.redhat.com/browse/OTA-586
err = waitForCVOStatus(oc, 30, 8*60, "False",
".status.conditions[?(.type=='Progressing')].status", true)
return
}
func unsetCVOverrides(oc *exutil.CLI) {
e2e.Logf("Unset /spec/overrides...")
_, err := ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"remove", "/spec/overrides", nil}})
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting overrides to disappear from cluster conditions...")
err = waitForCVOStatus(oc, 30, 8*60,
"Disabling ownership via cluster version overrides prevents upgrades",
".status.conditions[?(.type=='Upgradeable')].message", false)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check no ClusterVersionOverridesSet in `oc adm upgrade` msg...")
upgStatusOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(upgStatusOutput).NotTo(o.ContainSubstring("ClusterVersionOverridesSet"))
}
// Check if a non-namespace resource existed
func isGlobalResourceExist(oc *exutil.CLI, resourceType string) bool {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to get resource %s", resourceType)
if strings.Contains(output, "No resources found") {
e2e.Logf("there is no %s in this cluster!", resourceType)
return false
}
return true
}
// Check ICSP or IDMS to get mirror registry info
func getMirrorRegistry(oc *exutil.CLI) (registry string, err error) {
if isGlobalResourceExist(oc, "ImageContentSourcePolicy") {
if registry, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy",
"-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err == nil {
registry, _, _ = strings.Cut(registry, "/")
} else {
err = fmt.Errorf("failed to acquire mirror registry from ICSP: %v", err)
}
return
} else if isGlobalResourceExist(oc, "ImageDigestMirrorSet") {
if registry, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageDigestMirrorSet",
"-o", "jsonpath={.items[0].spec.imageDigestMirrors[0].mirrors[0]}").Output(); err == nil {
registry, _, _ = strings.Cut(registry, "/")
} else {
err = fmt.Errorf("failed to acquire mirror registry from IDMS: %v", err)
}
return
} else {
err = fmt.Errorf("no ICSP or IDMS found!")
return
}
}
// Run "oc adm release info" cmd to get release info of the current release
func getReleaseInfo(oc *exutil.CLI) (output string, err error) {
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
err = os.Mkdir(tempDataDir, 0755)
defer os.RemoveAll(tempDataDir)
if err != nil {
err = fmt.Errorf("failed to create tempdir %s: %v", tempDataDir, err)
return
}
if err = oc.AsAdmin().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--confirm", "--to="+tempDataDir).Execute(); err != nil {
err = fmt.Errorf("failed to extract dockerconfig: %v", err)
return
}
if output, err = oc.AsAdmin().Run("adm").Args("release", "info", "-a", tempDataDir+"/.dockerconfigjson", "-ojson").Output(); err != nil {
e2e.Logf("warning: release info failed once with:\n\"%v\"", err)
//Workaround disconnected baremental clusters that don't have cert for the registry
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
var mirror_registry string
mirror_registry, err = getMirrorRegistry(oc)
if mirror_registry != "" {
if err != nil {
err = fmt.Errorf("error out getting mirror registry: %v", err)
return
}
if err = oc.AsAdmin().Run("adm").Args("release", "info", "--insecure", "-a", tempDataDir+"/.dockerconfigjson", "-ojson").Execute(); err != nil {
err = fmt.Errorf("warning: insecure release info for disconnected baremetal failed with:\n\"%v\"", err)
}
return
}
}
//Workaround c2s/cs2s clusters that only have token to the mirror in pull secret
var region, image, mirror string
if region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure",
"cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output(); err != nil {
err = fmt.Errorf("failed to get cluster region: %v", err)
return
}
// region us-iso-* represent C2S, us-isob-* represent SC2S
if !strings.Contains(region, "us-iso-") && !strings.Contains(region, "us-isob-") {
err = fmt.Errorf("oc adm release failed, and no retry for non-c2s/cs2s region: %s", region)
return
}
if image, err = exutil.GetReleaseImage(oc); err != nil {
err = fmt.Errorf("failed to get cluster release image: %v", err)
return
}
if mirror, err = oc.AsAdmin().Run("get").Args("ImageContentSourcePolicy",
"-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err != nil {
err = fmt.Errorf("failed to acquire mirror from ICSP: %v", err)
return
}
if output, err = oc.AsAdmin().Run("adm").Args("release", "info",
"--insecure", "-a", tempDataDir+"/.dockerconfigjson",
fmt.Sprintf("%s@%s", mirror, strings.Split(image, "@")[1])).Output(); err != nil {
err = fmt.Errorf("failed to get release info: %v", err)
return
}
}
return
}
// Get CVO pod object values by jsonpath
// Returns: object_value(map), error
func getCVOPod(oc *exutil.CLI, jsonpath string) (map[string]interface{}, error) {
var objectValue map[string]interface{}
pod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-cluster-version", "-o=jsonpath={.items[].metadata.name}").Output()
if err != nil {
return nil, fmt.Errorf("getting CVO pod name failed: %v", err)
}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("pod", pod, "-n", "openshift-cluster-version",
"-o", fmt.Sprintf("jsonpath={%s}", jsonpath)).Output()
if err != nil {
return nil, fmt.Errorf("getting CVO pod object values failed: %v", err)
}
err = json.Unmarshal([]byte(output), &objectValue)
if err != nil {
return nil, fmt.Errorf("unmarshal release info error: %v", err)
}
return objectValue, nil
}
// clearing fake upgrade and waiting for ReleaseAccepted recovery
func recoverReleaseAccepted(oc *exutil.CLI) (err error) {
var out string
if out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "--clear").Output(); err != nil {
err = fmt.Errorf("clearing upgrade failed with: %s\n%v", out, err)
e2e.Logf(err.Error())
return err
}
if err = waitForCondition(oc, 30, 480, "True",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
if strings.Compare(err.Error(), "timed out waiting for the condition") == 0 {
err = fmt.Errorf("ReleaseAccepted condition is not back to True within 8m")
} else {
err = fmt.Errorf("waiting for ReleaseAccepted returned error: %s", err.Error())
}
e2e.Logf(err.Error())
}
return err
}
func getTargetPayload(oc *exutil.CLI, imageType string) (releasePayload string, err error) {
switch imageType {
case "stable":
latest4StableImage, err := exutil.GetLatest4StableImage()
if err != nil {
return "", err
}
imageInfo, err := oc.AsAdmin().WithoutNamespace().Run("image").Args("info", latest4StableImage, "-ojson").Output()
if err != nil {
return "", err
}
imageDigest := gjson.Get(imageInfo, "digest").String()
return fmt.Sprintf("quay.io/openshift-release-dev/ocp-release@%s", imageDigest), nil
case "nightly":
clusterVersion, _, err := exutil.GetClusterVersion(oc)
if err != nil {
return "", err
}
latest4NightlyImage, err := exutil.GetLatestNightlyImage(clusterVersion)
if err != nil {
return "", err
}
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
err = os.Mkdir(tempDataDir, 0755)
defer os.RemoveAll(tempDataDir)
if err != nil {
return "", err
}
err = exutil.GetPullSec(oc, tempDataDir)
if err != nil {
return "", err
}
authFile := tempDataDir + "/.dockerconfigjson"
imageInfo, err := oc.AsAdmin().WithoutNamespace().Run("image").Args("info", "-a", authFile, latest4NightlyImage, "-ojson").Output()
if err != nil {
return "", err
}
imageDigest := gjson.Get(imageInfo, "digest").String()
return fmt.Sprintf("registry.ci.openshift.org/ocp/release@%s", imageDigest), nil
default:
return "", fmt.Errorf("unrecognized imageType")
}
}
// included==true, means check expected string should be included in events
// included==false, means check expected string should not be included in events
func checkCVOEvents(oc *exutil.CLI, included bool, expected []string) (err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", "openshift-cluster-version").Output()
if err != nil {
return err
}
e2e.Logf("the cvo event: %s", output)
if included {
for _, exp := range expected {
matched, _ := regexp.MatchString(exp, output)
if !matched {
return fmt.Errorf("msg: %s is not found in events", exp)
}
}
} else {
for _, exp := range expected {
matched, _ := regexp.MatchString(exp, output)
if matched {
return fmt.Errorf("msg: %s is found in events", exp)
}
}
}
return nil
}
// PathExists
// @Description Check if a file exists
// @Create jianl Jan 22 2025
// @Param path string the file path
// @Return (bool, error)
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// updateFile
// @Description Replace oldString in a given file by newString
// @Create jianl Jan 22 2025
// @Param filePath string the file path
// @Param oldString string the old string will be replaced
// @Param newString string new string will used to replace the oldString in file
// @Return nil
func updateFile(filePath string, oldString string, newString string) (err error) {
data, err := ioutil.ReadFile(filePath)
if err != nil {
log.Panicf("failed reading data from file: %s", err)
return err
}
updatedFileData := strings.Replace(string(data), oldString, newString, -1)
err = os.WriteFile(filePath, []byte(updatedFileData), 0644)
if err != nil {
log.Panicf("failed to write file: %s", err)
return err
}
return nil
}
|
package cvo
| ||||
function
|
openshift/openshift-tests-private
|
465a867f-7d96-4c20-a7ae-7b89421412ce
|
copy
|
['"io/ioutil"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func copy(src, target string) error {
bytesRead, err := ioutil.ReadFile(src)
if err != nil {
return err
}
if exit, _ := PathExists(target); exit {
os.Remove(target)
}
err = ioutil.WriteFile(target, bytesRead, 0o755)
if err != nil {
return err
}
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
c86aabee-6d4b-466c-94df-815f1a72aa5c
|
GetDeploymentsYaml
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func GetDeploymentsYaml(oc *exutil.CLI, deploymentName string, namespace string) (string, error) {
e2e.Logf("Dumping deployments %s from namespace %s", deploymentName, namespace)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deploymentName, "-n", namespace, "-o", "yaml").Output()
if err != nil {
e2e.Logf("Error dumping deployments: %v", err)
return "", err
}
e2e.Logf(out)
return out, err
}
|
cvo
| |||||
function
|
openshift/openshift-tests-private
|
e2db60c4-218b-4a64-9a8d-162a66035bf0
|
PodExec
|
['"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func PodExec(oc *exutil.CLI, script string, namespace string, podName string) (string, error) {
var out string
waitErr := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) {
var err error
out, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podName, "--", "/bin/bash", "-c", script).Output()
return true, err
})
return out, waitErr
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
568d0ee6-d3e5-49ed-9475-8b85bff136c7
|
waitForAlert
|
['"encoding/json"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func waitForAlert(oc *exutil.CLI, alertString string, interval time.Duration, timeout time.Duration, state string) (bool, map[string]string, error) {
if len(state) > 0 {
if state != "pending" && state != "firing" {
return false, nil, fmt.Errorf("state %s is not supported", state)
}
}
e2e.Logf("Waiting for alert %s pending or firing...", alertString)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(
"-n", "openshift-monitoring",
"route", "prometheus-k8s",
"-o=jsonpath={.spec.host}").Output()
if err != nil || len(url) == 0 {
return false, nil, fmt.Errorf("error getting the hostname of route prometheus-k8s %v", err)
}
token, err := exutil.GetSAToken(oc)
if err != nil || len(token) == 0 {
return false, nil, fmt.Errorf("error getting SA token %v", err)
}
alertCMD := fmt.Sprintf("curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\")'", token, url, alertString)
alertAnnoCMD := fmt.Sprintf("curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\").annotations'", token, url, alertString)
alertStateCMD := fmt.Sprintf("curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\").state'", token, url, alertString)
// Poll returns timed out waiting for the condition when timeout is reached
count := 0
if pollErr := wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
count++
metrics, err := exec.Command("bash", "-c", alertCMD).Output()
if err != nil {
e2e.Logf("Error retrieving prometheus alert metrics: %v, retry %d...", err, count)
return false, nil
}
if len(string(metrics)) == 0 {
e2e.Logf("Prometheus alert metrics nil, retry %d...", count)
return false, nil
}
if len(state) > 0 {
alertState, err := exec.Command("bash", "-c", alertStateCMD).Output()
if err != nil {
return false, fmt.Errorf("error getting alert state")
}
if state == "pending" && string(alertState) != "pending" {
return false, fmt.Errorf("alert state is not expected, expected pending but actual is %s", string(alertState))
}
if state == "firing" {
if int(interval)*count < int(timeout) {
if string(alertState) == "pending" {
e2e.Logf("Prometheus alert state is pending, waiting for firing, retry %d...", count)
return false, nil
}
return false, fmt.Errorf("alert state is not expected, expected pending in the waiting time window but actual is %s", string(alertState))
} else if string(alertState) == "firing" {
return true, nil
} else {
return false, fmt.Errorf("alert state is not expected, expected firing when the waiting time is reached but actual is %s", string(alertState))
}
}
return true, nil
}
return true, nil
}); pollErr != nil {
return false, nil, pollErr
}
e2e.Logf("Alert %s found", alertString)
annotation, err := exec.Command("bash", "-c", alertAnnoCMD).Output()
if err != nil || len(string(annotation)) == 0 {
return true, nil, fmt.Errorf("error getting annotation for alert %s", alertString)
}
var annoMap map[string]string
if err := json.Unmarshal(annotation, &annoMap); err != nil {
return true, nil, fmt.Errorf("error converting annotation to map for alert %s", alertString)
}
return true, annoMap, nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
7b25e70a-0125-4ed8-b46e-1d2faab46e5a
|
waitForCondition
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func waitForCondition(oc *exutil.CLI, interval time.Duration, timeout time.Duration, expectedCondition string, args ...string) error {
e2e.Logf("Checking condition for: oc %v", args)
err := wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run(args[0]).Args(args[1:]...).Output()
if err != nil {
e2e.Logf("Checking condition error:%v", err)
return false, err
}
condition := strings.Replace(string(output), "\n", "", -1)
if strings.Compare(condition, expectedCondition) != 0 {
e2e.Logf("Current condition is: '%s' Waiting for condition to be '%s'...", condition, expectedCondition)
return false, nil
}
e2e.Logf("Current condition is: %v", condition)
return true, nil
})
if err != nil {
return err
}
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
290836cf-730f-48ad-a050-3b0fa81a192f
|
getAlert
|
['"encoding/json"', '"fmt"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getAlert(oc *exutil.CLI, alertSelector string) map[string]interface{} {
var alertInfo map[string]interface{}
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(
"-n", "openshift-monitoring",
"route", "prometheus-k8s",
"-o=jsonpath={.spec.host}").Output()
if err != nil || len(url) == 0 {
e2e.Logf("error getting the hostname of route prometheus-k8s %v", err)
return nil
}
token, err := exutil.GetSAToken(oc)
if err != nil || len(token) == 0 {
e2e.Logf("error getting SA token %v", err)
return nil
}
command := fmt.Sprintf("curl -skH \"Authorization: Bearer %s\" https://%s/api/v1/alerts"+
" | jq -r '[.data.alerts[]|select(%s)][0]'", token, url, alertSelector)
output, err := exec.Command("bash", "-c", command).Output()
if err != nil {
e2e.Logf("Getting alert error:%v for %s", err, strings.ReplaceAll(command, token[5:], "*****"))
return nil
}
if len(output) == 0 {
e2e.Logf("No alert found for %v", alertSelector)
return nil
}
err = json.Unmarshal(output, &alertInfo)
if err != nil {
e2e.Logf("Unmarshal alert error:%v in %s for %s", err, output, strings.ReplaceAll(command, token[5:], "*****"))
return nil
}
e2e.Logf("Alert found: %v", alertInfo)
return alertInfo
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
84190d4d-0ba4-4644-8cbb-4b773354ccf8
|
getAlertByName
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getAlertByName(oc *exutil.CLI, alertName string, name string) map[string]interface{} {
return getAlert(oc, fmt.Sprintf(".labels.alertname == \"%s\" and .labels.name == \"%s\"", alertName, name))
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
733b0011-7493-429a-b51a-90bbacb88c63
|
CreateBucket
|
['"context"', '"time"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func CreateBucket(client *storage.Client, projectID, bucketName string) error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := client.Bucket(bucketName).Create(ctx, projectID, nil); err != nil {
return err
}
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
83c802a0-c632-4003-bf29-b43f3db80371
|
UploadFile
|
['"context"', '"fmt"', '"io"', '"os"', '"time"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func UploadFile(client *storage.Client, bucket, object, file string) error {
// Open local file
f, err := os.Open(file)
if err != nil {
return fmt.Errorf("os.Open: %v", err)
}
defer f.Close()
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*50)
defer cancel()
// Upload an object with storage.Writer.
wc := client.Bucket(bucket).Object(object).NewWriter(ctx)
if _, err = io.Copy(wc, f); err != nil {
return fmt.Errorf("io.Copy: %v", err)
}
if err := wc.Close(); err != nil {
return fmt.Errorf("Writer.Close: %v", err)
}
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
5d9390ed-42df-406a-8cde-e563c0ae97a8
|
MakePublic
|
['"context"', '"time"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func MakePublic(client *storage.Client, bucket, object string) error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
acl := client.Bucket(bucket).Object(object).ACL()
if err := acl.Set(ctx, storage.AllUsers, storage.RoleReader); err != nil {
return err
}
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
25e481f5-2828-4d04-88eb-a42878b17b5a
|
DeleteObject
|
['"context"', '"time"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func DeleteObject(client *storage.Client, bucket, object string) error {
if object == "" {
return nil
}
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
o := client.Bucket(bucket).Object(object)
if err := o.Delete(ctx); err != nil {
return err
}
e2e.Logf("Object: %v deleted", object)
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
985d5371-e21f-4f0e-bbc8-e98527e6bb2c
|
DeleteBucket
|
['"context"', '"time"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func DeleteBucket(client *storage.Client, bucketName string) error {
if bucketName == "" {
return nil
}
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := client.Bucket(bucketName).Delete(ctx); err != nil {
return err
}
e2e.Logf("Bucket: %v deleted", bucketName)
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
edf2bf7e-3320-4688-a580-1d4e63e3da66
|
GenerateReleaseVersion
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func GenerateReleaseVersion(oc *exutil.CLI) string {
sourceVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.version}").Output()
if err != nil {
return ""
}
splits := strings.Split(sourceVersion, ".")
if len(splits) > 1 {
if sourceMinorNum, err := strconv.Atoi(splits[1]); err == nil {
targeMinorNum := sourceMinorNum + 1
splits[1] = strconv.Itoa(targeMinorNum)
return strings.Join(splits, ".")
}
}
return ""
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
1734ad36-0671-4a72-bb0e-6e06fdd6119a
|
GenerateReleasePayload
|
['"crypto/sha256"', '"encoding/hex"', '"math/rand"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func GenerateReleasePayload(oc *exutil.CLI) string {
var targetDigest string
sourcePayload, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.image}").Output()
if err != nil {
return ""
}
data := make([]byte, 10)
if _, err := rand.Read(data); err == nil {
sh256Bytes := sha256.Sum256(data)
targetDigest = hex.EncodeToString(sh256Bytes[:])
} else {
return ""
}
splits := strings.Split(sourcePayload, ":")
if len(splits) > 1 {
splits[1] = targetDigest
return strings.Join(splits, ":")
}
return ""
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
ba353284-d7b0-418b-8a0f-534de8dad340
|
updateGraph
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func updateGraph(oc *exutil.CLI, graphTemplate string) (string, string, string, error) {
e2e.Logf("Graph Template: %v", graphTemplate)
// Assume the cluster is not being upgraded, then desired version will be the current version
sourceVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.version}").Output()
if err != nil {
return "", "", "", err
}
sourcePayload, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.image}").Output()
if err != nil {
return "", "", "", err
}
targetVersion := GenerateReleaseVersion(oc)
if targetVersion == "" {
return "", "", "", fmt.Errorf("error get target version")
}
targetPayload := GenerateReleasePayload(oc)
if targetPayload == "" {
return "", "", "", fmt.Errorf("error get target payload")
}
err = updateFile(graphTemplate, "sourceversion", sourceVersion)
if err != nil {
return "", "", "", err
}
err = updateFile(graphTemplate, "sourcepayload", sourcePayload)
if err != nil {
return "", "", "", err
}
err = updateFile(graphTemplate, "targetversion", targetVersion)
if err != nil {
return "", "", "", err
}
err = updateFile(graphTemplate, "targetpayload", targetPayload)
if err != nil {
return "", "", "", err
}
return graphTemplate, targetVersion, targetPayload, nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
9cbef798-a72e-48f1-8134-22c5ff78e1bb
|
buildGraph
|
['"fmt"', '"io"', '"net/http"', '"path/filepath"', '"time"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func buildGraph(client *storage.Client, oc *exutil.CLI, projectID string, graphName string) (
url string, bucket string, object string, targetVersion string, targetPayload string, err error) {
var graphFile string
var resp *http.Response
var body []byte
// If given a full file path, then we use it directly, otherwith find it in fixture folder
if exit, _ := PathExists(graphName); !exit {
graphDataDir := exutil.FixturePath("testdata", "ota/cvo")
graphName = filepath.Join(graphDataDir, graphName)
}
if graphFile, targetVersion, targetPayload, err = updateGraph(oc, graphName); err != nil {
return
}
e2e.Logf("Graph file: %v updated", graphFile)
// Give the bucket a unique name
bucket = fmt.Sprintf("ocp-ota-%d", time.Now().Unix())
if err = CreateBucket(client, projectID, bucket); err != nil {
return
}
e2e.Logf("Bucket: %v created", bucket)
// Give the object a unique name
object = fmt.Sprintf("graph-%d", time.Now().Unix())
if err = UploadFile(client, bucket, object, graphFile); err != nil {
return
}
e2e.Logf("Object: %v uploaded", object)
// Make the object public
if err = MakePublic(client, bucket, object); err != nil {
return
}
e2e.Logf("Object: %v public", object)
url = fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object)
// testing endpoint accessible and logging graph contents
if resp, err = http.Get(url); err == nil {
defer resp.Body.Close()
if body, err = io.ReadAll(resp.Body); err == nil {
e2e.Logf(string(body))
}
}
return
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
07f8055e-11da-433b-b57e-749bca4a4f13
|
restoreCVSpec
|
['"encoding/json"', '"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func restoreCVSpec(upstream string, channel string, oc *exutil.CLI) {
e2e.Logf("Restoring upgrade graph to '%s' channel to '%s'", upstream, channel)
if channel != "nochange" {
_ = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "channel", "--allow-explicit-channel", channel).Execute()
time.Sleep(5 * time.Second)
currChannel, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[].spec.channel}").Output()
if currChannel != channel {
e2e.Logf("Error on channel recovery, expected %s, but got %s", channel, currChannel)
}
}
if upstream != "nochange" {
if upstream == "" {
_ = oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterversion/version", "--type=json", "-p", "[{\"op\":\"remove\", \"path\":\"/spec/upstream\"}]").Execute()
} else {
_ = oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterversion/version", "--type=merge", "--patch", fmt.Sprintf("{\"spec\":{\"upstream\":\"%s\"}}", upstream)).Execute()
}
currUpstream, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[].spec.upstream}").Output()
if currUpstream != upstream {
e2e.Logf("Error on upstream recovery, expected %s, but got %s", upstream, currUpstream)
}
}
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
89cdc5bb-c8c6-4d70-8145-4925f709501f
|
extractManifest
|
['"fmt"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func extractManifest(oc *exutil.CLI) (tempDataDir string, err error) {
tempDataDir = filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
if err = os.Mkdir(tempDataDir, 0755); err != nil {
err = fmt.Errorf("failed to create directory: %v", err)
return
}
if err = oc.AsAdmin().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--confirm", "--to="+tempDataDir).Execute(); err != nil {
err = fmt.Errorf("failed to extract dockerconfig: %v", err)
return
}
manifestDir := filepath.Join(tempDataDir, "manifest")
if err = oc.AsAdmin().Run("adm").Args("release", "extract", "--to", manifestDir, "-a", tempDataDir+"/.dockerconfigjson").Execute(); err != nil {
e2e.Logf("warning: release extract failed once with:\n\"%v\"", err)
//Workaround disconnected baremental clusters that don't have cert for the registry
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
var mirror_registry string
mirror_registry, err = exutil.GetMirrorRegistry(oc)
if mirror_registry != "" {
if err != nil {
err = fmt.Errorf("error out getting mirror registry: %v", err)
return
}
if err = oc.AsAdmin().Run("adm").Args("release", "extract", "--insecure", "--to", manifestDir, "-a", tempDataDir+"/.dockerconfigjson").Execute(); err != nil {
err = fmt.Errorf("warning: insecure release extract for disconnected baremetal failed with:\n\"%v\"", err)
}
return
}
}
//Workaround c2s/cs2s clusters that only have token to the mirror in pull secret
var region, image, mirror string
if region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure",
"cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output(); err != nil {
err = fmt.Errorf("failed to get cluster region: %v", err)
return
}
// region us-iso-* represent C2S, us-isob-* represent SC2S
if !strings.Contains(region, "us-iso-") && !strings.Contains(region, "us-isob-") {
err = fmt.Errorf("oc adm release failed, and no retry for non-c2s/cs2s region: %s", region)
return
}
if image, err = exutil.GetReleaseImage(oc); err != nil {
err = fmt.Errorf("failed to get cluster release image: %v", err)
return
}
if mirror, err = oc.AsAdmin().Run("get").Args("ImageContentSourcePolicy",
"-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err != nil {
err = fmt.Errorf("failed to acquire mirror from ICSP: %v", err)
return
}
if err = oc.AsAdmin().Run("adm").Args("release", "extract",
"--from", fmt.Sprintf("%s@%s", mirror, strings.Split(image, "@")[1]),
"--to", manifestDir, "-a", tempDataDir+"/.dockerconfigjson", "--insecure").Execute(); err != nil {
err = fmt.Errorf("failed to extract manifests: %v", err)
return
}
}
return
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
13dd85d2-c332-4bcd-ac8b-bedd3c91d2e2
|
extractIncludedManifestWithInstallcfg
|
['"fmt"', '"os"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func extractIncludedManifestWithInstallcfg(oc *exutil.CLI, creds bool, cfg string, image string, cloud string) (tempDataDir string, err error) {
tempDataDir = filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
var out string
if err = os.Mkdir(tempDataDir, 0755); err != nil {
err = fmt.Errorf("failed to create directory: %v", err)
return
}
if creds && cloud != "" {
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--install-config", cfg, "--included", "--credentials-requests", "--cloud", cloud, "--from", image, "--to", tempDataDir).Output()
} else if creds {
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--install-config", cfg, "--included", "--credentials-requests", "--from", image, "--to", tempDataDir).Output()
} else if cloud != "" {
err = fmt.Errorf("--cloud only works with --credentials-requests,creds_var: %v,cloud_var: %v", creds, cloud)
} else {
out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--install-config", cfg, "--included", "--from", image, "--to", tempDataDir).Output()
}
if err != nil {
err = fmt.Errorf("failed to extract manifest: %v, command output:%v", err, out)
return
}
return
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
1637c6a2-f067-448b-abec-4d5946de5f2b
|
getDefaultCapsInCR
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getDefaultCapsInCR(version string) []string {
switch version {
case "4.19":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.18":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.17":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.16":
return []string{"CloudCredential", "CloudCredential+CloudControllerManager", "CloudCredential+Ingress", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.15":
return []string{"CloudCredential", "MachineAPI+CloudCredential", "ImageRegistry+CloudCredential", "Storage+CloudCredential"}
case "4.14":
return []string{"Storage", "MachineAPI"}
default:
e2e.Logf("Unknown version:%s detected!", version)
return nil
}
}
|
cvo
| |||||
function
|
openshift/openshift-tests-private
|
bf66d5fa-7906-43e3-8bd6-823002e0691e
|
getRandomPlatform
|
['"math/rand"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getRandomPlatform() string {
types := [...]string{"aws", "azure", "gcp", "vsphere"}
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
index := seed.Intn(len(types) - 1)
return types[index]
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
def41d01-9017-481f-9873-65d70ef96ca6
|
getRandomString
|
['"math/rand"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
12993041-6e41-4574-97f7-1d8394d61fe8
|
getCVObyJP
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getCVObyJP(oc *exutil.CLI, jsonpath string) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").
Args("clusterversion", "version",
"-o", fmt.Sprintf("jsonpath={%s}", jsonpath)).Output()
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
c7259f53-a699-42ae-9dce-3ab2730f841e
|
getCVOcontArg
|
['"encoding/json"', '"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getCVOcontArg(oc *exutil.CLI, argQuery string) (string, int, error) {
depArgs, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("-n", "openshift-cluster-version",
"deployment", "cluster-version-operator",
"-o", "jsonpath={.spec.template.spec.containers[0].args}").Output()
if err != nil {
e2e.Logf("Error getting cvo deployment args: %v", err)
return "", -1, err
}
var result []string
err = json.Unmarshal([]byte(depArgs), &result)
if err != nil {
e2e.Logf("Error Unmarshal cvo deployment args: %v", err)
return "", -1, err
}
for index, arg := range result {
if strings.Contains(arg, argQuery) {
e2e.Logf("query '%s' found '%s' at Index: %d", argQuery, arg, index)
val := strings.Split(arg, "=")
if len(val) > 1 {
return val[1], index, nil
}
return val[0], index, nil
}
}
return "", -1, fmt.Errorf("error: cvo deployment arg %s not found", argQuery)
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
10a4649c-110e-4d63-bd84-88c9b756d40a
|
ocJSONPatch
|
['"encoding/json"']
|
['JSONp']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func ocJSONPatch(oc *exutil.CLI, namespace string, resource string, patch []JSONp) (patchOutput string, err error) {
p, err := json.Marshal(patch)
if err != nil {
e2e.Logf("ocJSONPatch Error - json.Marshal: '%v'", err)
o.Expect(err).NotTo(o.HaveOccurred())
}
if namespace != "" {
patchOutput, err = oc.AsAdmin().WithoutNamespace().Run("patch").
Args("-n", namespace, resource, "--type=json", "--patch", string(p)).Output()
} else {
patchOutput, err = oc.AsAdmin().WithoutNamespace().Run("patch").
Args(resource, "--type=json", "--patch", string(p)).Output()
}
e2e.Logf("patching '%s'\nwith '%s'\nresult '%s'", resource, string(p), patchOutput)
return
}
|
cvo
| |||
function
|
openshift/openshift-tests-private
|
1ff88f93-097f-4c8c-95e6-d6cf425f2078
|
patchCVOcontArg
|
['"fmt"']
|
['JSONp']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func patchCVOcontArg(oc *exutil.CLI, index int, value string) (string, error) {
patch := []JSONp{
{"replace",
fmt.Sprintf("/spec/template/spec/containers/0/args/%d", index),
value},
}
return ocJSONPatch(oc,
"openshift-cluster-version",
"deployment/cluster-version-operator",
patch)
}
|
cvo
| |||
function
|
openshift/openshift-tests-private
|
c4dd35ef-45af-4398-a779-0efed84cc9c1
|
checkUpdates
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func checkUpdates(oc *exutil.CLI, conditional bool, interval time.Duration, timeout time.Duration, expStrings ...string) bool {
var (
cmdOut string
err error
)
if pollErr := wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
if conditional {
cmdOut, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "--include-not-recommended").Output()
} else {
cmdOut, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
}
for _, str := range expStrings {
if !strings.Contains(cmdOut, str) || err != nil {
return false, err
}
}
return true, nil
}); pollErr != nil {
e2e.Logf("last oc adm upgrade returned:\n%s\nstderr: %v\nexpecting:\n%s\n", cmdOut, err, strings.Join(expStrings, "\n\n"))
return false
}
return true
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
dfd78c6c-c03c-4c98-af1d-89aedbfb57d1
|
changeCap
|
['JSONp']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func changeCap(oc *exutil.CLI, base bool, cap interface{}) (string, error) {
var spec string
if base {
spec = "/spec/capabilities/baselineCapabilitySet"
} else {
spec = "/spec/capabilities/additionalEnabledCapabilities"
}
if cap == nil {
return ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"remove", spec, nil}})
}
// if spec.capabilities is not present, patch to add capabilities
orgCap, err := getCVObyJP(oc, ".spec.capabilities")
if err != nil {
return "", err
}
if orgCap == "" {
value := make(map[string]interface{})
_, err = ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"add", "/spec/capabilities", value}})
if err != nil {
return "", err
}
}
return ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"add", spec, cap}})
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
58b07bd1-a79d-418d-97d1-2191bfb8ca42
|
verifyCaps
|
['"fmt"', '"cloud.google.com/go/storage"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func verifyCaps(oc *exutil.CLI, caps []string) (err error) {
// Important! this map should be updated each version with new capabilities, as they added to openshift.
capability_operators := map[string]string{
"baremetal": "baremetal",
"Console": "console",
"Insights": "insights",
"marketplace": "marketplace",
"Storage": "storage",
"openshift-samples": "openshift-samples",
"CSISnapshot": "csi-snapshot-controller",
"NodeTuning": "node-tuning",
"MachineAPI": "machine-api",
"Build": "build",
"DeploymentConfig": "dc",
"ImageRegistry": "image-registry",
"OperatorLifecycleManager": "operator-lifecycle-manager",
"CloudCredential": "cloud-credential",
"Ingress": "ingress",
"CloudControllerManager": "cloud-controller-manager",
"OperatorLifecycleManagerV1": "olm",
}
for _, cap := range caps {
prefix := "co"
if cap == "Build" || cap == "DeploymentConfig" {
prefix = "-A" // special case for caps that isn't co but a resource
}
// if there's a new cap missing in capability_operators - return error
if capability_operators[cap] == "" {
return fmt.Errorf("new unknown capability '%v'. please update automation: capability_operators in utils.go", cap)
}
if _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(prefix, capability_operators[cap]).Output(); err != nil {
return
}
}
return
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
226c3fe8-4e96-4d08-ad0a-1c464192d925
|
waitForCVOStatus
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func waitForCVOStatus(oc *exutil.CLI, interval time.Duration, timeout time.Duration, message string, jsonpath string, waitingToAppear bool) (err error) {
var prefix, out string
if !waitingToAppear {
prefix = "not "
}
e2e.Logf("Waiting for CVO '%s' %sto contain '%s'", jsonpath, prefix, message)
err = wait.Poll(interval*time.Second, timeout*time.Second, func() (bool, error) {
out, err = getCVObyJP(oc, jsonpath)
return strings.Contains(out, message) == waitingToAppear, err
})
if err != nil {
if strings.Compare(err.Error(), "timed out waiting for the condition") == 0 {
out, _ = getCVObyJP(oc, ".status.conditions")
err = fmt.Errorf("reached time limit of %s waiting for CVO %s %sto contain '%s', dumping conditions:\n%s",
timeout*time.Second, strings.NewReplacer(".status.conditions[?(.type=='", "", "')].", " ").Replace(jsonpath), prefix, message, out)
return
}
err = fmt.Errorf("while waiting for CVO %sto contain '%s', an error was received: %s %s", prefix, message, out, err.Error())
e2e.Logf(err.Error())
}
return
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
364b95e2-9ae6-4916-9183-d373b996c77d
|
setCVOverrides
|
['"encoding/json"', '"fmt"']
|
['JSONp']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func setCVOverrides(oc *exutil.CLI, resourceKind string, resourceName string, resourceNamespace string) (err error) {
type ovrd struct {
Ki string `json:"kind"`
Na string `json:"name"`
Ns string `json:"namespace"`
Un bool `json:"unmanaged"`
Gr string `json:"group"`
}
var ovPatch string
if ovPatch, err = ocJSONPatch(oc, "", "clusterversion/version", []JSONp{
{"add", "/spec/overrides", []ovrd{{resourceKind, resourceName, resourceNamespace, true, "apps"}}}}); err != nil {
return fmt.Errorf("patching /spec/overrides failed with: %s %v", ovPatch, err)
}
// upgradeable .reason may be ClusterVersionOverridesSet or MultipleReasons, but .message have to contain "overrides"
e2e.Logf("Waiting for Upgradeable to contain overrides message...")
if err = waitForCVOStatus(oc, 30, 8*60,
"Disabling ownership via cluster version overrides prevents upgrades",
".status.conditions[?(.type=='Upgradeable')].message", true); err != nil {
return
}
e2e.Logf("Waiting for ClusterVersionOverridesSet in oc adm upgrade...")
if !checkUpdates(oc, false, 30, 8*60, "ClusterVersionOverridesSet") {
return fmt.Errorf("no overrides message in oc adm upgrade within 8m")
}
e2e.Logf("Waiting for Progressing=false...")
//to workaround the fake upgrade by cv.overrrides, refer to https://issues.redhat.com/browse/OTA-586
err = waitForCVOStatus(oc, 30, 8*60, "False",
".status.conditions[?(.type=='Progressing')].status", true)
return
}
|
cvo
| |||
function
|
openshift/openshift-tests-private
|
3412999c-6de1-4a3b-b64d-9fa3fbb68d32
|
unsetCVOverrides
|
['JSONp']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func unsetCVOverrides(oc *exutil.CLI) {
e2e.Logf("Unset /spec/overrides...")
_, err := ocJSONPatch(oc, "", "clusterversion/version", []JSONp{{"remove", "/spec/overrides", nil}})
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting overrides to disappear from cluster conditions...")
err = waitForCVOStatus(oc, 30, 8*60,
"Disabling ownership via cluster version overrides prevents upgrades",
".status.conditions[?(.type=='Upgradeable')].message", false)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check no ClusterVersionOverridesSet in `oc adm upgrade` msg...")
upgStatusOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(upgStatusOutput).NotTo(o.ContainSubstring("ClusterVersionOverridesSet"))
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
f35a853a-32ab-4c20-8711-93ea36d62d83
|
isGlobalResourceExist
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func isGlobalResourceExist(oc *exutil.CLI, resourceType string) bool {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to get resource %s", resourceType)
if strings.Contains(output, "No resources found") {
e2e.Logf("there is no %s in this cluster!", resourceType)
return false
}
return true
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
ae79a9a2-004d-495c-89eb-ad5e04a7798c
|
getMirrorRegistry
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getMirrorRegistry(oc *exutil.CLI) (registry string, err error) {
if isGlobalResourceExist(oc, "ImageContentSourcePolicy") {
if registry, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy",
"-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err == nil {
registry, _, _ = strings.Cut(registry, "/")
} else {
err = fmt.Errorf("failed to acquire mirror registry from ICSP: %v", err)
}
return
} else if isGlobalResourceExist(oc, "ImageDigestMirrorSet") {
if registry, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageDigestMirrorSet",
"-o", "jsonpath={.items[0].spec.imageDigestMirrors[0].mirrors[0]}").Output(); err == nil {
registry, _, _ = strings.Cut(registry, "/")
} else {
err = fmt.Errorf("failed to acquire mirror registry from IDMS: %v", err)
}
return
} else {
err = fmt.Errorf("no ICSP or IDMS found!")
return
}
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
2950be7d-b797-43d9-9d9f-493a98f430e8
|
getReleaseInfo
|
['"fmt"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getReleaseInfo(oc *exutil.CLI) (output string, err error) {
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
err = os.Mkdir(tempDataDir, 0755)
defer os.RemoveAll(tempDataDir)
if err != nil {
err = fmt.Errorf("failed to create tempdir %s: %v", tempDataDir, err)
return
}
if err = oc.AsAdmin().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--confirm", "--to="+tempDataDir).Execute(); err != nil {
err = fmt.Errorf("failed to extract dockerconfig: %v", err)
return
}
if output, err = oc.AsAdmin().Run("adm").Args("release", "info", "-a", tempDataDir+"/.dockerconfigjson", "-ojson").Output(); err != nil {
e2e.Logf("warning: release info failed once with:\n\"%v\"", err)
//Workaround disconnected baremental clusters that don't have cert for the registry
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
var mirror_registry string
mirror_registry, err = getMirrorRegistry(oc)
if mirror_registry != "" {
if err != nil {
err = fmt.Errorf("error out getting mirror registry: %v", err)
return
}
if err = oc.AsAdmin().Run("adm").Args("release", "info", "--insecure", "-a", tempDataDir+"/.dockerconfigjson", "-ojson").Execute(); err != nil {
err = fmt.Errorf("warning: insecure release info for disconnected baremetal failed with:\n\"%v\"", err)
}
return
}
}
//Workaround c2s/cs2s clusters that only have token to the mirror in pull secret
var region, image, mirror string
if region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure",
"cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output(); err != nil {
err = fmt.Errorf("failed to get cluster region: %v", err)
return
}
// region us-iso-* represent C2S, us-isob-* represent SC2S
if !strings.Contains(region, "us-iso-") && !strings.Contains(region, "us-isob-") {
err = fmt.Errorf("oc adm release failed, and no retry for non-c2s/cs2s region: %s", region)
return
}
if image, err = exutil.GetReleaseImage(oc); err != nil {
err = fmt.Errorf("failed to get cluster release image: %v", err)
return
}
if mirror, err = oc.AsAdmin().Run("get").Args("ImageContentSourcePolicy",
"-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err != nil {
err = fmt.Errorf("failed to acquire mirror from ICSP: %v", err)
return
}
if output, err = oc.AsAdmin().Run("adm").Args("release", "info",
"--insecure", "-a", tempDataDir+"/.dockerconfigjson",
fmt.Sprintf("%s@%s", mirror, strings.Split(image, "@")[1])).Output(); err != nil {
err = fmt.Errorf("failed to get release info: %v", err)
return
}
}
return
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
239888f1-86e9-4d68-8ef0-62eebc812f1b
|
getCVOPod
|
['"encoding/json"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getCVOPod(oc *exutil.CLI, jsonpath string) (map[string]interface{}, error) {
var objectValue map[string]interface{}
pod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-cluster-version", "-o=jsonpath={.items[].metadata.name}").Output()
if err != nil {
return nil, fmt.Errorf("getting CVO pod name failed: %v", err)
}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args("pod", pod, "-n", "openshift-cluster-version",
"-o", fmt.Sprintf("jsonpath={%s}", jsonpath)).Output()
if err != nil {
return nil, fmt.Errorf("getting CVO pod object values failed: %v", err)
}
err = json.Unmarshal([]byte(output), &objectValue)
if err != nil {
return nil, fmt.Errorf("unmarshal release info error: %v", err)
}
return objectValue, nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
831ba6d5-b6d5-4f98-a278-bd36a5f68581
|
recoverReleaseAccepted
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func recoverReleaseAccepted(oc *exutil.CLI) (err error) {
var out string
if out, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("upgrade", "--clear").Output(); err != nil {
err = fmt.Errorf("clearing upgrade failed with: %s\n%v", out, err)
e2e.Logf(err.Error())
return err
}
if err = waitForCondition(oc, 30, 480, "True",
"get", "clusterversion", "version", "-o", "jsonpath={.status.conditions[?(@.type=='ReleaseAccepted')].status}"); err != nil {
if strings.Compare(err.Error(), "timed out waiting for the condition") == 0 {
err = fmt.Errorf("ReleaseAccepted condition is not back to True within 8m")
} else {
err = fmt.Errorf("waiting for ReleaseAccepted returned error: %s", err.Error())
}
e2e.Logf(err.Error())
}
return err
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
f478c4d5-6c80-4376-83d2-0bf193d64519
|
getTargetPayload
|
['"fmt"', '"io"', '"os"', '"path/filepath"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func getTargetPayload(oc *exutil.CLI, imageType string) (releasePayload string, err error) {
switch imageType {
case "stable":
latest4StableImage, err := exutil.GetLatest4StableImage()
if err != nil {
return "", err
}
imageInfo, err := oc.AsAdmin().WithoutNamespace().Run("image").Args("info", latest4StableImage, "-ojson").Output()
if err != nil {
return "", err
}
imageDigest := gjson.Get(imageInfo, "digest").String()
return fmt.Sprintf("quay.io/openshift-release-dev/ocp-release@%s", imageDigest), nil
case "nightly":
clusterVersion, _, err := exutil.GetClusterVersion(oc)
if err != nil {
return "", err
}
latest4NightlyImage, err := exutil.GetLatestNightlyImage(clusterVersion)
if err != nil {
return "", err
}
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
err = os.Mkdir(tempDataDir, 0755)
defer os.RemoveAll(tempDataDir)
if err != nil {
return "", err
}
err = exutil.GetPullSec(oc, tempDataDir)
if err != nil {
return "", err
}
authFile := tempDataDir + "/.dockerconfigjson"
imageInfo, err := oc.AsAdmin().WithoutNamespace().Run("image").Args("info", "-a", authFile, latest4NightlyImage, "-ojson").Output()
if err != nil {
return "", err
}
imageDigest := gjson.Get(imageInfo, "digest").String()
return fmt.Sprintf("registry.ci.openshift.org/ocp/release@%s", imageDigest), nil
default:
return "", fmt.Errorf("unrecognized imageType")
}
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
4a2f11b4-7def-41a8-ba08-122dec31f36b
|
checkCVOEvents
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func checkCVOEvents(oc *exutil.CLI, included bool, expected []string) (err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", "openshift-cluster-version").Output()
if err != nil {
return err
}
e2e.Logf("the cvo event: %s", output)
if included {
for _, exp := range expected {
matched, _ := regexp.MatchString(exp, output)
if !matched {
return fmt.Errorf("msg: %s is not found in events", exp)
}
}
} else {
for _, exp := range expected {
matched, _ := regexp.MatchString(exp, output)
if matched {
return fmt.Errorf("msg: %s is found in events", exp)
}
}
}
return nil
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
58f878dd-023b-4310-bd8b-e5d6ddfd268e
|
PathExists
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
|
cvo
| ||||
function
|
openshift/openshift-tests-private
|
961d37b0-0a12-4686-8cfe-3fee77c4ef64
|
updateFile
|
['"io/ioutil"', '"log"', '"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/cvo/utils.go
|
func updateFile(filePath string, oldString string, newString string) (err error) {
data, err := ioutil.ReadFile(filePath)
if err != nil {
log.Panicf("failed reading data from file: %s", err)
return err
}
updatedFileData := strings.Replace(string(data), oldString, newString, -1)
err = os.WriteFile(filePath, []byte(updatedFileData), 0644)
if err != nil {
log.Panicf("failed to write file: %s", err)
return err
}
return nil
}
|
cvo
| ||||
test
|
openshift/openshift-tests-private
|
3fafedc6-8eb3-48f3-8024-b52f32f33d74
|
osus
|
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
arch "github.com/openshift/openshift-tests-private/test/extended/util/architecture"
container "github.com/openshift/openshift-tests-private/test/extended/util/container"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
package osus
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
arch "github.com/openshift/openshift-tests-private/test/extended/util/architecture"
container "github.com/openshift/openshift-tests-private/test/extended/util/container"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-updates] OTA osus should", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("osus", exutil.KubeConfigPath())
g.BeforeEach(func() {
exutil.SkipMissingQECatalogsource(oc)
arch.SkipNonAmd64SingleArch(oc)
})
//author: [email protected]
g.It("Author:jiajliu-High-35869-install/uninstall osus operator from OperatorHub through CLI [Serial]", func() {
testDataDir := exutil.FixturePath("testdata", "ota/osus")
ogTemp := filepath.Join(testDataDir, "operatorgroup.yaml")
subTemp := filepath.Join(testDataDir, "subscription.yaml")
oc.SetupProject()
og := operatorGroup{
name: "osus-og",
namespace: oc.Namespace(),
template: ogTemp,
}
sub := subscription{
name: "osus-sub",
namespace: oc.Namespace(),
channel: "v1",
approval: "Automatic",
operatorName: "cincinnati-operator",
sourceName: "qe-app-registry",
sourceNamespace: "openshift-marketplace",
template: subTemp,
}
exutil.By("Create OperatorGroup...")
og.create(oc)
exutil.By("Create Subscription...")
sub.create(oc)
exutil.By("Check updateservice operator installed successully!")
e2e.Logf("Waiting for osus operator pod creating...")
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=name=updateservice-operator", "-n", oc.Namespace()).Output()
if err != nil || strings.Contains(output, "No resources found") {
e2e.Logf("error: %v; output: %s", err, output)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod with name=updateservice-operator is not found")
e2e.Logf("Waiting for osus operator pod running...")
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=name=updateservice-operator", "-n", oc.Namespace(), "-o=jsonpath={.items[0].status.phase}").Output()
if err != nil || strings.Compare(status, "Running") != 0 {
e2e.Logf("error: %v; status: %s", err, status)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod with name=updateservice-operator is not Running")
exutil.By("Delete OperatorGroup...")
og.delete(oc)
exutil.By("Delete Subscription...")
sub.delete(oc)
exutil.By("Delete CSV...")
installedCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", sub.namespace, "-o=jsonpath={.items[?(@.spec.displayName==\"OpenShift Update Service\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(installedCSV).NotTo(o.BeEmpty())
removeResource(oc, "-n", sub.namespace, "csv", installedCSV)
exutil.By("Check updateservice operator uninstalled successully!")
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("all", "-n", oc.Namespace()).Output()
if err != nil || !strings.Contains(output, "No resources found") {
e2e.Logf("error: %v; output: %s", err, output)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "updateservice operator is not uninstalled")
})
//author: [email protected]
g.It("NonPreRelease-Longduration-DisconnectedOnly-Author:jiajliu-High-44958-z version upgrade OSUS operator and operand for disconnected cluster [Disruptive]", func() {
updatePath := map[string]string{
"srcver": "5.0.2",
"tgtver": "5.0.3",
}
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
defer os.RemoveAll(tempDataDir)
err := os.MkdirAll(tempDataDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
exutil.By("Install osus operator with srcver")
installOSUSOperator(oc, updatePath["srcver"], "Manual")
preOPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
csvInPrePod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", preOPName, "-o=jsonpath={.spec.containers[].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvInPrePod).To(o.ContainSubstring(updatePath["srcver"]), "Unexpected operator version installed: %s.", csvInPrePod)
exutil.By("Install OSUS instance")
e2e.Logf("Mirror OCP release and graph data image by oc-mirror...")
registry, err := exutil.GetMirrorRegistry(oc)
o.Expect(err).NotTo(o.HaveOccurred())
credDir, err := locatePodmanCred(oc, tempDataDir)
defer os.RemoveAll(credDir)
o.Expect(err).NotTo(o.HaveOccurred())
outdir, err := ocmirror(oc, registry+"/oc-mirror", tempDataDir, "")
e2e.Logf("oc mirror output dir is %s", outdir)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Configure the Registry Certificate as trusted for cincinnati...")
certFile := tempDataDir + "/cert"
err = exutil.GetUserCAToFile(oc, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
addCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config.openshift.io/cluster", "-o=jsonpath={.spec.additionalTrustedCA}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer restoreAddCA(oc, addCA)
err = trustCert(oc, registry, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Create updateservice...")
defer uninstallOSUSApp(oc)
err = installOSUSAppOCMirror(oc, outdir)
o.Expect(err).NotTo(o.HaveOccurred())
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("z-version upgrade against operator and operand")
err = upgradeOSUS(oc, "update-service-oc-mirror", updatePath["tgtver"])
o.Expect(err).NotTo(o.HaveOccurred())
})
//author: [email protected]
g.It("NonPreRelease-ConnectedOnly-Author:jiajliu-High-69204-y version upgrade OSUS operator and operand for connected cluster", func() {
updatePath := map[string]string{
"srcver": "4.9.1",
"tgtver": "5.0.1",
}
oc.SetupProject()
skipUnsupportedOCPVer(oc, updatePath["srcver"])
exutil.By("Install osus operator with srcver")
installOSUSOperator(oc, updatePath["srcver"], "Manual")
preOPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
csvInPrePod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", preOPName, "-o=jsonpath={.spec.containers[].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvInPrePod).To(o.ContainSubstring(updatePath["srcver"]), "Unexpected operator version installed: %s.", csvInPrePod)
exutil.By("Install OSUS instance")
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "us69204",
namespace: oc.Namespace(),
template: usTemp,
graphdata: "quay.io/openshift-qe-optional-operators/graph-data:latest",
releases: "quay.io/openshifttest/ocp-release",
replicas: 1,
}
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Y-version upgrade against operator and operand")
err = upgradeOSUS(oc, us.name, updatePath["tgtver"])
o.Expect(err).NotTo(o.HaveOccurred())
})
})
var _ = g.Describe("[sig-updates] OTA osus instance should", func() {
defer g.GinkgoRecover()
oc := exutil.NewCLI("osusinstace", exutil.KubeConfigPath())
g.BeforeEach(func() {
exutil.SkipMissingQECatalogsource(oc)
arch.SkipNonAmd64SingleArch(oc)
oc.SetupProject()
installOSUSOperator(oc, "", "Automatic")
})
//author: [email protected]
g.It("NonPreRelease-Longduration-DisconnectedOnly-Author:jianl-High-62641-install/uninstall updateservice instance using oc-mirror [Disruptive]", func() {
exutil.By("Mirror OCP release and graph data image by oc-mirror")
registry, err := exutil.GetMirrorRegistry(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Registry is %s", registry)
dirname := "/tmp/case62641"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
credDir, err := locatePodmanCred(oc, dirname)
defer os.RemoveAll(credDir)
o.Expect(err).NotTo(o.HaveOccurred())
outdir, err := ocmirror(oc, registry+"/oc-mirror", dirname, "")
e2e.Logf("oc mirror output dir is %s", outdir)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure the Registry Certificate as trusted for cincinnati")
certFile := dirname + "/cert"
err = exutil.GetUserCAToFile(oc, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
addCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config.openshift.io/cluster", "-o=jsonpath={.spec.additionalTrustedCA}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer restoreAddCA(oc, addCA)
err = trustCert(oc, registry, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install OSUS instance")
defer uninstallOSUSApp(oc)
err = installOSUSAppOCMirror(oc, outdir)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
//author: [email protected]
g.It("DisconnectedOnly-VMonly-Author:jianl-High-35944-install/uninstall updateservice instance and build graph image as non root [Disruptive]", func() {
exutil.By("Check if it's a AWS/GCP/Azure cluster")
exutil.SkipIfPlatformTypeNot(oc, "gcp, aws, azure")
dirname := "/tmp/case35944"
registry, err := exutil.GetMirrorRegistry(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Registry is %s", registry)
defer os.RemoveAll(dirname)
err = exutil.GetPullSec(oc, dirname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Build and push graph data image by podman as non root user")
graphdataTag := registry + "/ota-35944/graph-data:latest"
err = buildPushGraphImage(oc, graphdataTag, dirname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Mirror OCP images using oc adm release mirror")
err = mirror(oc, registry, "quay.io/openshift-release-dev/ocp-release:4.13.0-x86_64", dirname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure the Registry Certificate as trusted for cincinnati")
certFile := dirname + "/cert"
err = exutil.GetUserCAToFile(oc, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
addCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config.openshift.io/cluster", "-o=jsonpath={.spec.additionalTrustedCA}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer restoreAddCA(oc, addCA)
err = trustCert(oc, registry, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install OSUS instance")
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "update-service-35944",
namespace: oc.Namespace(),
template: usTemp,
graphdata: graphdataTag,
releases: registry + "/ocp-release",
replicas: 2,
}
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
//author: [email protected]
g.It("ConnectedOnly-Author:jianl-High-52596-High-59687-install/uninstall updateservice instance on a connected/http/https proxy cluster", func() {
dirname := "/tmp/" + oc.Namespace() + "-osus"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install OSUS instance")
//We need to build and push the latest graph-data if there is new feature to the container
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "update-service-52596",
namespace: oc.Namespace(),
template: usTemp,
graphdata: "quay.io/openshift-qe-optional-operators/graph-data:latest",
releases: "quay.io/openshift-qe-optional-operators/osus-ocp-release",
replicas: 2,
}
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
//author: [email protected]
g.It("Longduration-NonPreRelease-ConnectedOnly-Author:jiajliu-High-48621-Updateservice pod should be re-deployed when update graphDataImage of updateservice", func() {
exutil.By("Install OSUS instance with graph-data:1.0")
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "us48621",
namespace: oc.Namespace(),
template: usTemp,
graphdata: "quay.io/openshift-qe-optional-operators/graph-data:1.0",
releases: "quay.io/openshift-qe-optional-operators/osus-ocp-release",
replicas: 1,
}
defer uninstallOSUSApp(oc)
err := installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for osus instance pod rolling to expected replicas...")
err = wait.Poll(1*time.Minute, 10*time.Minute, func() (bool, error) {
runningPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || len(strings.Fields(runningPodName)) != us.replicas {
e2e.Logf("error: %v; running pod: %s", err, runningPodName)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling to expected replicas")
runningPodNamePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
graphDataImagePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePre, "-n", us.namespace, "-o=jsonpath={.spec.initContainers[].image}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImagePre).To(o.ContainSubstring("1.0"))
exutil.By("Update OSUS instance with graph-data:1.1")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", us.namespace, "updateservice/"+us.name, "-p", `{"spec":{"graphDataImage":"quay.io/openshift-qe-optional-operators/graph-data:1.1"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for osus instance pod rolling...")
err = wait.Poll(1*time.Minute, 10*time.Minute, func() (bool, error) {
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || len(strings.Fields(runningPodNamePost)) != us.replicas || strings.Contains(runningPodNamePost, runningPodNamePre) {
e2e.Logf("error: %v; running pod after update image: %s; while running pod before update image: %s", err, runningPodNamePost, runningPodNamePre)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling successfully after update image")
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
graphDataImagePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePost, "-n", us.namespace, "-o=jsonpath={.spec.initContainers[].image}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImagePost).To(o.ContainSubstring("1.1"))
})
//author: [email protected]
g.It("Longduration-NonPreRelease-ConnectedOnly-VMonly-Author:jiajliu-High-52586-Updateservice pod should pull the latest graphDataImage instead of existed old one [Disruptive]", func() {
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
defer os.RemoveAll(tempDataDir)
err := os.MkdirAll(tempDataDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.GetPullSec(oc, tempDataDir)
o.Expect(err).NotTo(o.HaveOccurred())
authFile := tempDataDir + "/.dockerconfigjson"
podmanCLI := container.NewPodmanCLI()
graphdataRepo := "quay.io/openshift-qe-optional-operators/graph-data"
graphdataOld := graphdataRepo + ":1.0"
graphdataNew := graphdataRepo + ":1.1"
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "us52586",
namespace: oc.Namespace(),
template: usTemp,
graphdata: graphdataRepo + ":latest",
releases: "quay.io/openshift-qe-optional-operators/osus-ocp-release",
replicas: 1,
}
exutil.By("Tag image graph-data:1.0 with latest and push the image")
output, err := podmanCLI.Run("pull").Args(graphdataOld, "--tls-verify=false", "--authfile", authFile).Output()
defer podmanCLI.RemoveImage(graphdataOld)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to pull image: %s", output)
output, err = podmanCLI.Run("tag").Args(graphdataOld, us.graphdata).Output()
defer podmanCLI.RemoveImage(us.graphdata)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to tag image: %s", output)
output, err = podmanCLI.Run("push").Args(us.graphdata, "--tls-verify=false", "--authfile", authFile).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to push image: %s", output)
exutil.By("Install OSUS instance with graph-data:latest")
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for osus instance pod rolling to expected replicas...")
err = wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) {
runningPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || len(strings.Fields(runningPodName)) != us.replicas {
e2e.Logf("error: %v; running pod: %s", err, runningPodName)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling to expected replicas")
runningPodNamePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check imagePullPolicy...")
graphDataImagePolicy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePre, "-n", us.namespace, "-o=jsonpath={.spec.initContainers[].imagePullPolicy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImagePolicy).To(o.Equal("Always"), "Unexpected imagePullPolicy: %v", graphDataImagePolicy)
nodeNamePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", us.namespace, runningPodNamePre, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
graphDataImageIDPre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePre, "-n", us.namespace, "-o=jsonpath={.status.initContainerStatuses[].imageID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Cordon worker nodes without osus instance pod scheduled")
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
for _, node := range strings.Fields(nodes) {
if node == nodeNamePre {
continue
}
oc.AsAdmin().WithoutNamespace().Run("adm").Args("uncordon", node).Execute()
err = wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) {
nodeReady, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", node, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
if err != nil || nodeReady != "True" {
e2e.Logf("error: %v; node %s status: %s", err, node, nodeReady)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "fail to uncordon node!")
}
}()
for _, node := range strings.Fields(nodes) {
if node == nodeNamePre {
continue
}
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("cordon", node).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to cordon node %s: %v", node, err)
}
exutil.By("Tag image graph-data:1.1 with latest and push the image")
output, err = podmanCLI.Run("pull").Args(graphdataNew, "--tls-verify=false", "--authfile", authFile).Output()
defer podmanCLI.RemoveImage(graphdataNew)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to pull image: %s", output)
output, err = podmanCLI.Run("tag").Args(graphdataNew, us.graphdata).Output()
defer podmanCLI.RemoveImage(us.graphdata)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to tag image: %s", output)
output, err = podmanCLI.Run("push").Args(us.graphdata, "--tls-verify=false", "--authfile", authFile).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to push image: %s", output)
e2e.Logf("Waiting for osus instance pod rolling...")
err = wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || strings.Contains(runningPodNamePost, runningPodNamePre) {
e2e.Logf("error: %v; running pod after update image: %s; while running pod before retag image: %s", err, runningPodNamePost, runningPodNamePre)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling successfully after retag image")
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check osus instance pod is not rescheduled...")
nodeNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", us.namespace, runningPodNamePost, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeNamePost).To(o.Equal(nodeNamePre), "osus instance pod rescheduled from node %v to node %s unexpectedly", nodeNamePre, nodeNamePost)
e2e.Logf("Check osus instance pod image updated...")
graphDataImageIDPost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePost, "-n", us.namespace, "-o=jsonpath={.status.initContainerStatuses[].imageID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImageIDPost).NotTo(o.Equal(graphDataImageIDPre), "fail to update osus instance pod image")
})
})
|
package osus
| ||||
test case
|
openshift/openshift-tests-private
|
2b839e5f-8e3c-4bee-9d8d-c33c6ba06c25
|
Author:jiajliu-High-35869-install/uninstall osus operator from OperatorHub through CLI [Serial]
|
['"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("Author:jiajliu-High-35869-install/uninstall osus operator from OperatorHub through CLI [Serial]", func() {
testDataDir := exutil.FixturePath("testdata", "ota/osus")
ogTemp := filepath.Join(testDataDir, "operatorgroup.yaml")
subTemp := filepath.Join(testDataDir, "subscription.yaml")
oc.SetupProject()
og := operatorGroup{
name: "osus-og",
namespace: oc.Namespace(),
template: ogTemp,
}
sub := subscription{
name: "osus-sub",
namespace: oc.Namespace(),
channel: "v1",
approval: "Automatic",
operatorName: "cincinnati-operator",
sourceName: "qe-app-registry",
sourceNamespace: "openshift-marketplace",
template: subTemp,
}
exutil.By("Create OperatorGroup...")
og.create(oc)
exutil.By("Create Subscription...")
sub.create(oc)
exutil.By("Check updateservice operator installed successully!")
e2e.Logf("Waiting for osus operator pod creating...")
err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=name=updateservice-operator", "-n", oc.Namespace()).Output()
if err != nil || strings.Contains(output, "No resources found") {
e2e.Logf("error: %v; output: %s", err, output)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod with name=updateservice-operator is not found")
e2e.Logf("Waiting for osus operator pod running...")
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=name=updateservice-operator", "-n", oc.Namespace(), "-o=jsonpath={.items[0].status.phase}").Output()
if err != nil || strings.Compare(status, "Running") != 0 {
e2e.Logf("error: %v; status: %s", err, status)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod with name=updateservice-operator is not Running")
exutil.By("Delete OperatorGroup...")
og.delete(oc)
exutil.By("Delete Subscription...")
sub.delete(oc)
exutil.By("Delete CSV...")
installedCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", sub.namespace, "-o=jsonpath={.items[?(@.spec.displayName==\"OpenShift Update Service\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(installedCSV).NotTo(o.BeEmpty())
removeResource(oc, "-n", sub.namespace, "csv", installedCSV)
exutil.By("Check updateservice operator uninstalled successully!")
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("all", "-n", oc.Namespace()).Output()
if err != nil || !strings.Contains(output, "No resources found") {
e2e.Logf("error: %v; output: %s", err, output)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "updateservice operator is not uninstalled")
})
| |||||
test case
|
openshift/openshift-tests-private
|
5fc25a31-7e31-4bc0-abca-2f076be8d78d
|
NonPreRelease-Longduration-DisconnectedOnly-Author:jiajliu-High-44958-z version upgrade OSUS operator and operand for disconnected cluster [Disruptive]
|
['"fmt"', '"os"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("NonPreRelease-Longduration-DisconnectedOnly-Author:jiajliu-High-44958-z version upgrade OSUS operator and operand for disconnected cluster [Disruptive]", func() {
updatePath := map[string]string{
"srcver": "5.0.2",
"tgtver": "5.0.3",
}
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
defer os.RemoveAll(tempDataDir)
err := os.MkdirAll(tempDataDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
exutil.By("Install osus operator with srcver")
installOSUSOperator(oc, updatePath["srcver"], "Manual")
preOPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
csvInPrePod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", preOPName, "-o=jsonpath={.spec.containers[].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvInPrePod).To(o.ContainSubstring(updatePath["srcver"]), "Unexpected operator version installed: %s.", csvInPrePod)
exutil.By("Install OSUS instance")
e2e.Logf("Mirror OCP release and graph data image by oc-mirror...")
registry, err := exutil.GetMirrorRegistry(oc)
o.Expect(err).NotTo(o.HaveOccurred())
credDir, err := locatePodmanCred(oc, tempDataDir)
defer os.RemoveAll(credDir)
o.Expect(err).NotTo(o.HaveOccurred())
outdir, err := ocmirror(oc, registry+"/oc-mirror", tempDataDir, "")
e2e.Logf("oc mirror output dir is %s", outdir)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Configure the Registry Certificate as trusted for cincinnati...")
certFile := tempDataDir + "/cert"
err = exutil.GetUserCAToFile(oc, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
addCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config.openshift.io/cluster", "-o=jsonpath={.spec.additionalTrustedCA}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer restoreAddCA(oc, addCA)
err = trustCert(oc, registry, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Create updateservice...")
defer uninstallOSUSApp(oc)
err = installOSUSAppOCMirror(oc, outdir)
o.Expect(err).NotTo(o.HaveOccurred())
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("z-version upgrade against operator and operand")
err = upgradeOSUS(oc, "update-service-oc-mirror", updatePath["tgtver"])
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
9a2ae29d-51d6-4890-9938-5027b31ea007
|
NonPreRelease-ConnectedOnly-Author:jiajliu-High-69204-y version upgrade OSUS operator and operand for connected cluster
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("NonPreRelease-ConnectedOnly-Author:jiajliu-High-69204-y version upgrade OSUS operator and operand for connected cluster", func() {
updatePath := map[string]string{
"srcver": "4.9.1",
"tgtver": "5.0.1",
}
oc.SetupProject()
skipUnsupportedOCPVer(oc, updatePath["srcver"])
exutil.By("Install osus operator with srcver")
installOSUSOperator(oc, updatePath["srcver"], "Manual")
preOPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
csvInPrePod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", preOPName, "-o=jsonpath={.spec.containers[].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvInPrePod).To(o.ContainSubstring(updatePath["srcver"]), "Unexpected operator version installed: %s.", csvInPrePod)
exutil.By("Install OSUS instance")
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "us69204",
namespace: oc.Namespace(),
template: usTemp,
graphdata: "quay.io/openshift-qe-optional-operators/graph-data:latest",
releases: "quay.io/openshifttest/ocp-release",
replicas: 1,
}
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Y-version upgrade against operator and operand")
err = upgradeOSUS(oc, us.name, updatePath["tgtver"])
o.Expect(err).NotTo(o.HaveOccurred())
})
| ||||||
test case
|
openshift/openshift-tests-private
|
69ec0533-27f2-4a01-8693-b434319cd7f5
|
NonPreRelease-Longduration-DisconnectedOnly-Author:jianl-High-62641-install/uninstall updateservice instance using oc-mirror [Disruptive]
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("NonPreRelease-Longduration-DisconnectedOnly-Author:jianl-High-62641-install/uninstall updateservice instance using oc-mirror [Disruptive]", func() {
exutil.By("Mirror OCP release and graph data image by oc-mirror")
registry, err := exutil.GetMirrorRegistry(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Registry is %s", registry)
dirname := "/tmp/case62641"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
credDir, err := locatePodmanCred(oc, dirname)
defer os.RemoveAll(credDir)
o.Expect(err).NotTo(o.HaveOccurred())
outdir, err := ocmirror(oc, registry+"/oc-mirror", dirname, "")
e2e.Logf("oc mirror output dir is %s", outdir)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure the Registry Certificate as trusted for cincinnati")
certFile := dirname + "/cert"
err = exutil.GetUserCAToFile(oc, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
addCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config.openshift.io/cluster", "-o=jsonpath={.spec.additionalTrustedCA}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer restoreAddCA(oc, addCA)
err = trustCert(oc, registry, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install OSUS instance")
defer uninstallOSUSApp(oc)
err = installOSUSAppOCMirror(oc, outdir)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
435c9878-99f4-4bfa-b74e-963527391ec1
|
DisconnectedOnly-VMonly-Author:jianl-High-35944-install/uninstall updateservice instance and build graph image as non root [Disruptive]
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("DisconnectedOnly-VMonly-Author:jianl-High-35944-install/uninstall updateservice instance and build graph image as non root [Disruptive]", func() {
exutil.By("Check if it's a AWS/GCP/Azure cluster")
exutil.SkipIfPlatformTypeNot(oc, "gcp, aws, azure")
dirname := "/tmp/case35944"
registry, err := exutil.GetMirrorRegistry(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Registry is %s", registry)
defer os.RemoveAll(dirname)
err = exutil.GetPullSec(oc, dirname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Build and push graph data image by podman as non root user")
graphdataTag := registry + "/ota-35944/graph-data:latest"
err = buildPushGraphImage(oc, graphdataTag, dirname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Mirror OCP images using oc adm release mirror")
err = mirror(oc, registry, "quay.io/openshift-release-dev/ocp-release:4.13.0-x86_64", dirname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure the Registry Certificate as trusted for cincinnati")
certFile := dirname + "/cert"
err = exutil.GetUserCAToFile(oc, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
addCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config.openshift.io/cluster", "-o=jsonpath={.spec.additionalTrustedCA}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer restoreAddCA(oc, addCA)
err = trustCert(oc, registry, certFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install OSUS instance")
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "update-service-35944",
namespace: oc.Namespace(),
template: usTemp,
graphdata: graphdataTag,
releases: registry + "/ocp-release",
replicas: 2,
}
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
2ec185c1-ec45-47ed-af66-f8a89ddd0078
|
ConnectedOnly-Author:jianl-High-52596-High-59687-install/uninstall updateservice instance on a connected/http/https proxy cluster
|
['"os"', 'container "github.com/openshift/openshift-tests-private/test/extended/util/container"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("ConnectedOnly-Author:jianl-High-52596-High-59687-install/uninstall updateservice instance on a connected/http/https proxy cluster", func() {
dirname := "/tmp/" + oc.Namespace() + "-osus"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install OSUS instance")
//We need to build and push the latest graph-data if there is new feature to the container
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "update-service-52596",
namespace: oc.Namespace(),
template: usTemp,
graphdata: "quay.io/openshift-qe-optional-operators/graph-data:latest",
releases: "quay.io/openshift-qe-optional-operators/osus-ocp-release",
replicas: 2,
}
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify OSUS instance works")
err = verifyOSUS(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
89ac8241-c2d7-4a0a-97f4-45eaeefa7c4c
|
Longduration-NonPreRelease-ConnectedOnly-Author:jiajliu-High-48621-Updateservice pod should be re-deployed when update graphDataImage of updateservice
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("Longduration-NonPreRelease-ConnectedOnly-Author:jiajliu-High-48621-Updateservice pod should be re-deployed when update graphDataImage of updateservice", func() {
exutil.By("Install OSUS instance with graph-data:1.0")
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "us48621",
namespace: oc.Namespace(),
template: usTemp,
graphdata: "quay.io/openshift-qe-optional-operators/graph-data:1.0",
releases: "quay.io/openshift-qe-optional-operators/osus-ocp-release",
replicas: 1,
}
defer uninstallOSUSApp(oc)
err := installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for osus instance pod rolling to expected replicas...")
err = wait.Poll(1*time.Minute, 10*time.Minute, func() (bool, error) {
runningPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || len(strings.Fields(runningPodName)) != us.replicas {
e2e.Logf("error: %v; running pod: %s", err, runningPodName)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling to expected replicas")
runningPodNamePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
graphDataImagePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePre, "-n", us.namespace, "-o=jsonpath={.spec.initContainers[].image}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImagePre).To(o.ContainSubstring("1.0"))
exutil.By("Update OSUS instance with graph-data:1.1")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", us.namespace, "updateservice/"+us.name, "-p", `{"spec":{"graphDataImage":"quay.io/openshift-qe-optional-operators/graph-data:1.1"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for osus instance pod rolling...")
err = wait.Poll(1*time.Minute, 10*time.Minute, func() (bool, error) {
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || len(strings.Fields(runningPodNamePost)) != us.replicas || strings.Contains(runningPodNamePost, runningPodNamePre) {
e2e.Logf("error: %v; running pod after update image: %s; while running pod before update image: %s", err, runningPodNamePost, runningPodNamePre)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling successfully after update image")
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
graphDataImagePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePost, "-n", us.namespace, "-o=jsonpath={.spec.initContainers[].image}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImagePost).To(o.ContainSubstring("1.1"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
dd454640-cb36-4968-b363-81a0dd22190a
|
Longduration-NonPreRelease-ConnectedOnly-VMonly-Author:jiajliu-High-52586-Updateservice pod should pull the latest graphDataImage instead of existed old one [Disruptive]
|
['"fmt"', '"os"', '"path/filepath"', '"strings"', '"time"', 'container "github.com/openshift/openshift-tests-private/test/extended/util/container"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/osus.go
|
g.It("Longduration-NonPreRelease-ConnectedOnly-VMonly-Author:jiajliu-High-52586-Updateservice pod should pull the latest graphDataImage instead of existed old one [Disruptive]", func() {
tempDataDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
defer os.RemoveAll(tempDataDir)
err := os.MkdirAll(tempDataDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.GetPullSec(oc, tempDataDir)
o.Expect(err).NotTo(o.HaveOccurred())
authFile := tempDataDir + "/.dockerconfigjson"
podmanCLI := container.NewPodmanCLI()
graphdataRepo := "quay.io/openshift-qe-optional-operators/graph-data"
graphdataOld := graphdataRepo + ":1.0"
graphdataNew := graphdataRepo + ":1.1"
usTemp := exutil.FixturePath("testdata", "ota", "osus", "updateservice.yaml")
us := updateService{
name: "us52586",
namespace: oc.Namespace(),
template: usTemp,
graphdata: graphdataRepo + ":latest",
releases: "quay.io/openshift-qe-optional-operators/osus-ocp-release",
replicas: 1,
}
exutil.By("Tag image graph-data:1.0 with latest and push the image")
output, err := podmanCLI.Run("pull").Args(graphdataOld, "--tls-verify=false", "--authfile", authFile).Output()
defer podmanCLI.RemoveImage(graphdataOld)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to pull image: %s", output)
output, err = podmanCLI.Run("tag").Args(graphdataOld, us.graphdata).Output()
defer podmanCLI.RemoveImage(us.graphdata)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to tag image: %s", output)
output, err = podmanCLI.Run("push").Args(us.graphdata, "--tls-verify=false", "--authfile", authFile).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to push image: %s", output)
exutil.By("Install OSUS instance with graph-data:latest")
defer uninstallOSUSApp(oc)
err = installOSUSAppOC(oc, us)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Waiting for osus instance pod rolling to expected replicas...")
err = wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) {
runningPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || len(strings.Fields(runningPodName)) != us.replicas {
e2e.Logf("error: %v; running pod: %s", err, runningPodName)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling to expected replicas")
runningPodNamePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check imagePullPolicy...")
graphDataImagePolicy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePre, "-n", us.namespace, "-o=jsonpath={.spec.initContainers[].imagePullPolicy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImagePolicy).To(o.Equal("Always"), "Unexpected imagePullPolicy: %v", graphDataImagePolicy)
nodeNamePre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", us.namespace, runningPodNamePre, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
graphDataImageIDPre, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePre, "-n", us.namespace, "-o=jsonpath={.status.initContainerStatuses[].imageID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Cordon worker nodes without osus instance pod scheduled")
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
for _, node := range strings.Fields(nodes) {
if node == nodeNamePre {
continue
}
oc.AsAdmin().WithoutNamespace().Run("adm").Args("uncordon", node).Execute()
err = wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) {
nodeReady, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", node, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
if err != nil || nodeReady != "True" {
e2e.Logf("error: %v; node %s status: %s", err, node, nodeReady)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "fail to uncordon node!")
}
}()
for _, node := range strings.Fields(nodes) {
if node == nodeNamePre {
continue
}
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("cordon", node).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to cordon node %s: %v", node, err)
}
exutil.By("Tag image graph-data:1.1 with latest and push the image")
output, err = podmanCLI.Run("pull").Args(graphdataNew, "--tls-verify=false", "--authfile", authFile).Output()
defer podmanCLI.RemoveImage(graphdataNew)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to pull image: %s", output)
output, err = podmanCLI.Run("tag").Args(graphdataNew, us.graphdata).Output()
defer podmanCLI.RemoveImage(us.graphdata)
o.Expect(err).NotTo(o.HaveOccurred(), "fail to tag image: %s", output)
output, err = podmanCLI.Run("push").Args(us.graphdata, "--tls-verify=false", "--authfile", authFile).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "fail to push image: %s", output)
e2e.Logf("Waiting for osus instance pod rolling...")
err = wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil || strings.Contains(runningPodNamePost, runningPodNamePre) {
e2e.Logf("error: %v; running pod after update image: %s; while running pod before retag image: %s", err, runningPodNamePost, runningPodNamePre)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod is not rolling successfully after retag image")
runningPodNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector", "app="+us.name, "-n", us.namespace, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check osus instance pod is not rescheduled...")
nodeNamePost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", us.namespace, runningPodNamePost, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeNamePost).To(o.Equal(nodeNamePre), "osus instance pod rescheduled from node %v to node %s unexpectedly", nodeNamePre, nodeNamePost)
e2e.Logf("Check osus instance pod image updated...")
graphDataImageIDPost, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", runningPodNamePost, "-n", us.namespace, "-o=jsonpath={.status.initContainerStatuses[].imageID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(graphDataImageIDPost).NotTo(o.Equal(graphDataImageIDPre), "fail to update osus instance pod image")
})
| |||||
test
|
openshift/openshift-tests-private
|
25cde54b-da59-40b4-bf54-fa53f03bfc03
|
utils
|
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
package osus
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type operatorGroup struct {
name string
namespace string
template string
}
type subscription struct {
name string
namespace string
channel string
approval string
operatorName string
sourceName string
sourceNamespace string
startingCSV string
template string
}
type resource struct {
oc *exutil.CLI
asAdmin bool
withoutNamespace bool
kind string
name string
requireNS bool
namespace string
}
type updateService struct {
name string
namespace string
graphdata string
releases string
template string
replicas int
}
type supportedMap struct {
osusver string
ocpver []string
}
func getProxyURL() *url.URL {
// Prefer https_proxy, fallback to http_proxy
proxyURLString := os.Getenv("https_proxy")
if proxyURLString == "" {
proxyURLString = os.Getenv("http_proxy")
}
if proxyURLString == "" {
return nil
}
proxyURL, err := url.Parse(proxyURLString)
if err != nil {
e2e.Failf("error parsing proxy URL: %v", err)
}
return proxyURL
}
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var cfgFileJson string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "osus-resource-cfg.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
cfgFileJson = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("the file of resource is %s", cfgFileJson)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", cfgFileJson).Execute()
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func (og *operatorGroup) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", og.template, "-p", "NAME="+og.name, "NAMESPACE="+og.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (sub *subscription) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "NAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.approval, "OPERATORNAME="+sub.operatorName, "SOURCENAME="+sub.sourceName, "SOURCENAMESPACE="+sub.sourceNamespace, "STARTINGCSV="+sub.startingCSV)
o.Expect(err).NotTo(o.HaveOccurred())
}
func removeResource(oc *exutil.CLI, parameters ...string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(parameters...).Output()
if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) {
e2e.Logf("No resource found!")
return
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (og *operatorGroup) delete(oc *exutil.CLI) {
removeResource(oc, "-n", og.namespace, "operatorgroup", og.name)
}
func (sub *subscription) delete(oc *exutil.CLI) {
removeResource(oc, "-n", sub.namespace, "subscription", sub.name)
}
func (us *updateService) create(oc *exutil.CLI) (err error) {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", us.template, "-p", "NAME="+us.name, "NAMESPACE="+us.namespace, "GRAPHDATA="+us.graphdata, "RELEASES="+us.releases, "REPLICAS="+strconv.Itoa(us.replicas))
return
}
// Check if pod is running
func waitForPodReady(oc *exutil.CLI, pod string, ns string) {
e2e.Logf("Waiting for %s pod creating...", pod)
pollErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
cmdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector="+pod, "-n", ns).Output()
if err != nil || strings.Contains(cmdOut, "No resources found") {
e2e.Logf("No pod found, keep trying! error: %v, cmdOut: %s", err, cmdOut)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("pod with name=%s is not found", pod))
e2e.Logf("Waiting for %s pod ready and running...", pod)
pollErr = wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
stateOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector="+pod, "-n", ns, "-o=jsonpath={.items[*].status.phase}").Output()
if err != nil {
e2e.Logf("pod phase status: %s with error %v, try again", stateOut, err)
return false, nil
}
state := strings.Split(stateOut, " ")
for _, s := range state {
if strings.Compare(s, "Running") != 0 {
e2e.Logf("pod status: %s, try again", s)
return false, nil
}
}
readyOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector="+pod, "-n", ns, "-o=jsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
if err != nil {
e2e.Logf("pod ready condition: %s with error %v, try again", readyOut, err)
return false, nil
}
ready := strings.Split(readyOut, " ")
for _, s := range ready {
if strings.Compare(s, "True") != 0 {
e2e.Logf("pod ready condition: %s, try again", s)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("pod %s is not running", pod))
}
func copyFile(source string, dest string) {
bytesRead, err := ioutil.ReadFile(source)
o.Expect(err).NotTo(o.HaveOccurred())
err = ioutil.WriteFile(dest, bytesRead, 0644)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Set ENV for oc-mirror credential
func locatePodmanCred(oc *exutil.CLI, dst string) (dirname string, err error) {
e2e.Logf("Setting env for oc-mirror credential")
if err = exutil.GetPullSec(oc, dst); err != nil {
return "", fmt.Errorf("extract pull-secret failed: %v", err)
}
if os.Getenv("CLUSTER_PROFILE_DIR") != "" {
cmd := fmt.Sprintf("jq -s '.[0]*.[1]' %s %s > %s", dst+"/.dockerconfigjson", os.Getenv("CLUSTER_PROFILE_DIR")+"/pull-secret", dst+"/auth.json")
if _, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
return "", fmt.Errorf("%s failed: %v", cmd, err)
}
} else {
copyFile(dst+"/.dockerconfigjson", dst+"/auth.json")
}
envDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
containerDir := envDir + "/containers/"
key := "XDG_RUNTIME_DIR"
currentRuntime, ex := os.LookupEnv(key)
if !ex {
if err = os.MkdirAll(containerDir, 0700); err != nil {
return "", fmt.Errorf("make dir failed: %v", err)
}
os.Setenv(key, envDir)
copyFile(dst+"/auth.json", containerDir+"auth.json")
return containerDir, nil
}
runtimeContainerDir := currentRuntime + "/containers/"
_, err = os.Stat(runtimeContainerDir + "auth.json")
if os.IsNotExist(err) {
if err = os.MkdirAll(runtimeContainerDir, 0700); err != nil {
return "", fmt.Errorf("make dir failed: %v", err)
}
copyFile(dst+"/auth.json", runtimeContainerDir+"auth.json")
}
return runtimeContainerDir, nil
}
// Mirror OCP release and graph data image to local registry
// Return the output direcotry which contains the manifests
func ocmirror(oc *exutil.CLI, registry string, dirname string, imageset string) (string, error) {
var imagesetTemplate string
if imageset == "" {
imagesetTemplate = exutil.FixturePath("testdata", "ota", "osus", "imageset-config.yaml")
} else {
imagesetTemplate = imageset
}
sedCmd := fmt.Sprintf("sed -i 's|REGISTRY|%s|g' %s", registry, imagesetTemplate)
// e2e.Logf(sedCmd)
if err := exec.Command("bash", "-c", sedCmd).Run(); err != nil {
e2e.Logf("Update the imageset template failed: %v", err.Error())
return "", err
}
// file, _ := os.Open(imagesetTemplate)
// b, _ := ioutil.ReadAll(file)
// e2e.Logf(string(b))
if err := os.Chdir(dirname); err != nil {
e2e.Logf("Failed to cd %s: %v", dirname, err.Error())
return "", err
}
output, err := oc.WithoutNamespace().WithoutKubeconf().Run("mirror").Args("-c", imagesetTemplate, "--ignore-history", "docker://"+registry, "--dest-skip-tls").Output()
if err != nil {
e2e.Logf("Mirror images failed: %v", err.Error())
return "", err
}
e2e.Logf("output of oc-mirror is %s", output)
substrings := strings.Split(output, " ")
outdir := dirname + "/" + substrings[len(substrings)-1]
return outdir, nil
}
// Check if image-registry is healthy
func checkCOHealth(oc *exutil.CLI, co string) bool {
e2e.Logf("Checking CO %s is healthy...", co)
status := "TrueFalseFalse"
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", co, "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err != nil {
e2e.Logf("Get co status failed: %v", err.Error())
return false
}
return strings.Contains(output, status)
}
// Configure the Registry Certificate as trusted for cincinnati
func trustCert(oc *exutil.CLI, registry string, cert string) (err error) {
var output string
certRegistry := registry
before, after, found := strings.Cut(registry, ":")
if found {
certRegistry = before + ".." + after
}
if err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", "openshift-config", "configmap", "trusted-ca", "--from-file="+certRegistry+"="+cert, "--from-file=updateservice-registry="+cert).Execute(); err != nil {
err = fmt.Errorf("create trust-ca configmap failed: %v", err)
return
}
if err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("image.config.openshift.io/cluster", "-p", `{"spec": {"additionalTrustedCA": {"name": "trusted-ca"}}}`, "--type=merge").Execute(); err != nil {
err = fmt.Errorf("patch image.config.openshift.io/cluster failed: %v", err)
return
}
waitErr := wait.Poll(30*time.Second, 10*time.Minute, func() (bool, error) {
registryHealth := checkCOHealth(oc, "image-registry")
if registryHealth {
return true, nil
}
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("co/image-registry", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].message}").Output()
e2e.Logf("Waiting for image-registry coming ready...")
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("Image registry is not ready with info %s\n", output))
return nil
}
// Install OSUS instance using manifests generated by oc-mirror
func installOSUSAppOCMirror(oc *exutil.CLI, outdir string) (err error) {
e2e.Logf("Install OSUS instance")
if err = oc.AsAdmin().Run("apply").Args("-f", outdir).Execute(); err != nil {
err = fmt.Errorf("install osus instance failed: %v", err)
return
}
waitForPodReady(oc, "app=update-service-oc-mirror", oc.Namespace())
return nil
}
// Returns OSUS instance name
func getOSUSApp(oc *exutil.CLI) (instance string, err error) {
e2e.Logf("Get OSUS instance")
instance, err = oc.AsAdmin().Run("get").Args("updateservice", "-o=jsonpath={.items[].metadata.name}").Output()
if err != nil {
err = fmt.Errorf("get OSUS instance failed: %v", err)
}
return
}
// Uninstall OSUS instance
func uninstallOSUSApp(oc *exutil.CLI) (err error) {
e2e.Logf("Uninstall OSUS instance")
instance, err := getOSUSApp(oc)
if err != nil {
return
}
_, err = oc.AsAdmin().Run("delete").Args("updateservice", instance).Output()
if err != nil {
err = fmt.Errorf("uninstall OSUS instance failed: %v", err)
return
}
return nil
}
// Verify the OSUS application works
func verifyOSUS(oc *exutil.CLI) (err error) {
e2e.Logf("Verify the OSUS works")
instance, err := getOSUSApp(oc)
if err != nil {
return fmt.Errorf("get OSUS app failed: %v", err)
}
PEURI, err := oc.AsAdmin().Run("get").Args("-o", "jsonpath={.status.policyEngineURI}", "updateservice", instance).Output()
if err != nil {
return fmt.Errorf("get policy engine URI failed: %v", err)
}
proxyURL := getProxyURL()
transCfg := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
client := &http.Client{Transport: transCfg}
// Any channel is available, we just check whether osus working properly and don't care about the channel itself.
uri := PEURI + "/api/upgrades_info/v1/graph?channel=stable-4.13"
e2e.Logf("check if osus update service available or not through graph URI: " + uri)
waitErr := wait.Poll(20*time.Second, 5*time.Minute, func() (bool, error) {
response, err := client.Get(uri)
if err != nil {
msg := fmt.Sprintf("reach graph URI failed: %v", err)
e2e.Logf(msg)
return false, nil
}
if response.StatusCode != 200 {
msg := fmt.Sprintf("graph URI is not active, response code is %v", response.StatusCode)
e2e.Logf(msg)
return false, nil
}
return true, nil
})
if waitErr != nil {
// Get OSUS pod logs when OSUS returns non-200
allOSUSPods, _ := oc.AsAdmin().Run("get").Args("pod").Output()
e2e.Logf("All OSUS pods: \n", allOSUSPods)
osusPod, _ := oc.AsAdmin().Run("get").Args("pod", "--selector=app="+instance, "-o=jsonpath={.items[].metadata.name}").Output()
podLogs, _ := oc.AsAdmin().Run("logs").Args(osusPod, "-c", "graph-builder").Output()
failLogs, _ := exec.Command("bash", "-c", "echo \""+podLogs+"\" | grep -Ei 'error|fail'").Output()
// Hardcode the max log lines
if len(failLogs) <= 1024 {
e2e.Logf("OSUS Pod Logs with error or fail info are: \n %s", failLogs)
} else {
e2e.Logf("OSUS Pod Logs with error or fail info are: \n %s", failLogs[len(failLogs)-1024:])
}
describePod, _ := oc.AsAdmin().Run("describe").Args("pod", osusPod).Output()
e2e.Logf("Describe the OSUS pod: \n %s", describePod)
}
exutil.AssertWaitPollNoErr(waitErr, "graph URI is not active")
return
}
func restoreAddCA(oc *exutil.CLI, addCA string) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-config", "configmap", "trusted-ca").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
var message string
if addCA == "" {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("image.config.openshift.io/cluster", "--type=json", "-p", "[{\"op\":\"remove\", \"path\":\"/spec/additionalTrustedCA\"}]").Execute()
} else {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("image.config.openshift.io/cluster", "--type=merge", "--patch", fmt.Sprintf("{\"spec\":{\"additionalTrustedCA\":%s}}", addCA)).Execute()
}
o.Expect(err).NotTo(o.HaveOccurred())
waitErr := wait.Poll(30*time.Second, 3*time.Minute, func() (bool, error) {
registryHealth := checkCOHealth(oc, "image-registry")
if registryHealth {
return true, nil
}
message, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("co/image-registry", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].message}").Output()
e2e.Logf("Wait for image-registry coming ready")
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("Image registry is not ready with info %s\n", message))
}
// Build graph-data image by using podman
func buildPushGraphImage(oc *exutil.CLI, tag string, dirname string) (err error) {
e2e.Logf("Build graph-data image")
dockerFile := exutil.FixturePath("testdata", "ota", "osus", "Dockerfile")
cmd := fmt.Sprintf("podman build -f %s -t %s", dockerFile, tag)
var out []byte
if out, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
err = fmt.Errorf("%s failed: %v\n%s", cmd, err, string(out))
return
}
cmd = fmt.Sprintf("podman push --trace --authfile %s --tls-verify=false %s", dirname+"/.dockerconfigjson", tag)
if out, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
err = fmt.Errorf("%s failed: %v\n%s", cmd, err, string(out))
return
}
return
}
// Mirror OCP images using oc adm release mirror
func mirror(oc *exutil.CLI, registry string, payload string, dirname string) (err error) {
e2e.Logf("Mirror OCP images by oc adm release mirror")
_, tag, found := strings.Cut(payload, ":")
if !found {
err = fmt.Errorf("the payload is invalid")
return
}
cmd := fmt.Sprintf("oc adm release mirror -a %s --insecure=true --from %s --to=%s --to-release-image=%s", dirname+"/.dockerconfigjson", payload, registry+"/ocp-image", registry+"/ocp-release:"+tag)
var out []byte
if out, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
err = fmt.Errorf("%s failed: %v\n%s", cmd, err, string(out))
return
}
return
}
// Install OSUS instance using oc
func installOSUSAppOC(oc *exutil.CLI, us updateService) (err error) {
e2e.Logf("Install OSUS instance")
if err = us.create(oc); err != nil {
err = fmt.Errorf("install osus instance failed: %v", err)
return
}
waitForPodReady(oc, "app="+us.name, oc.Namespace())
return nil
}
func installOSUSOperator(oc *exutil.CLI, version string, mode string) {
e2e.Logf("Install OSUS operator")
testDataDir := exutil.FixturePath("testdata", "ota/osus")
ogTemp := filepath.Join(testDataDir, "operatorgroup.yaml")
subTemp := filepath.Join(testDataDir, "subscription.yaml")
var csv string
if version == "" {
csv = version
} else {
csv = fmt.Sprintf("update-service-operator.v%s", version)
}
og := operatorGroup{
name: "osus-og",
namespace: oc.Namespace(),
template: ogTemp,
}
sub := subscription{
name: "osus-sub",
namespace: oc.Namespace(),
channel: "v1",
approval: mode,
operatorName: "cincinnati-operator",
sourceName: "qe-app-registry",
sourceNamespace: "openshift-marketplace",
startingCSV: csv,
template: subTemp,
}
e2e.Logf("Create OperatorGroup...")
og.create(oc)
e2e.Logf("Create Subscription...")
sub.create(oc)
if mode == "Manual" && version != "" {
e2e.Logf("Approve installplan manually...")
jsonpath := fmt.Sprintf("-o=jsonpath={.items[?(@.spec.clusterServiceVersionNames[]=='%s')].metadata.name}", csv)
o.Eventually(func() string {
osusIP, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", jsonpath, "-n", oc.Namespace()).Output()
e2e.Logf("waiting for ip: %s", osusIP)
return osusIP
}, 3*time.Minute, 1*time.Minute).ShouldNot(o.BeEmpty(), "Fail to generate installplan!")
osusIP, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", jsonpath, "-n", oc.Namespace()).Output()
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("installplan", osusIP, "--type=json", "-p", "[{\"op\": \"replace\", \"path\": \"/spec/approved\", \"value\": true}]", "-n", oc.Namespace()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
waitForPodReady(oc, "name=updateservice-operator", oc.Namespace())
}
func upgradeOSUS(oc *exutil.CLI, usname string, version string) error {
e2e.Logf("Check installplan available...")
ips, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
if len(strings.Fields(ips)) != 2 {
return fmt.Errorf("unexpected installplan found: %s", ips)
}
preOPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
preAPPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=app="+usname, "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
e2e.Logf("Manually approve new installplan for update...")
jsonpath := fmt.Sprintf("-o=jsonpath={.items[?(@.spec.clusterServiceVersionNames[]=='update-service-operator.v%s')].metadata.name}", version)
osusIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", jsonpath, "-n", oc.Namespace()).Output()
if err != nil {
return err
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("installplan", osusIP, "--type=json", "-p", "[{\"op\": \"replace\", \"path\": \"/spec/approved\", \"value\": true}]", "-n", oc.Namespace()).Execute()
if err != nil {
return err
}
e2e.Logf("Waiting for operator and operand pods rolling...")
var (
postOPName string
errOP error
)
preAppList := strings.Fields(preAPPName)
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, 5*time.Minute, true, func(context.Context) (bool, error) {
postOPName, errOP = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
postAPPName, errAPP := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=app="+usname, "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if errOP != nil || errAPP != nil {
return false, nil
}
if strings.Compare(postOPName, preOPName) == 0 {
e2e.Logf("waiting: operator pods after upgrade: %s; while operator pods before upgrade: %s", postOPName, preOPName)
return false, nil
}
for _, pre := range preAppList {
if strings.Contains(postAPPName, pre) {
e2e.Logf("waiting: app pods after upgrade: %s; while app pods before upgrade: %s", postAPPName, preAPPName)
return false, nil
}
}
if len(strings.Fields(postAPPName)) != len(preAppList) {
e2e.Logf("waiting for pods [%s] to expected number %d", postAPPName, len(preAppList))
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("pod is not rolling successfully after upgrade: %v", err)
}
csvInPostPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", postOPName, "-o=jsonpath={.spec.containers[].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
if !strings.Contains(csvInPostPod, version) {
return fmt.Errorf("unexpected operator version upgraded: %s", csvInPostPod)
}
return nil
}
func skipUnsupportedOCPVer(oc *exutil.CLI, version string) {
mapTest := supportedMap{
osusver: "4.9.1",
ocpver: []string{"4.8", "4.9", "4.10", "4.11"},
}
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if version != mapTest.osusver {
g.Skip(fmt.Sprintf("Skip test for cluster with unrecoginzed old osus version %s!", version))
}
skip := true
for _, ver := range mapTest.ocpver {
if clusterVersion == ver {
skip = false
break
}
}
if skip {
g.Skip("Skip test for cluster with old osus on unsupported ocp version!")
}
}
// check if osus instance re-deployed sucessfully
func verifyAppRolling(oc *exutil.CLI, usname string, prelist []string) (postlist []string, err error) {
e2e.Logf("Waiting for operand pods rolling...")
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, 5*time.Minute, true, func(context.Context) (bool, error) {
postAPPName, err := oc.AsAdmin().Run("get").Args("pods", "--selector=app="+usname, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil {
return false, nil
}
for _, pre := range prelist {
if strings.Contains(postAPPName, pre) {
e2e.Logf("waiting: current app pods: %s; while app pods before rolling: %s", postAPPName, prelist)
return false, nil
}
}
postlist = strings.Fields(postAPPName)
if len(postlist) != len(prelist) {
e2e.Logf("waiting for pods [%s] to expected number %d", postlist, len(prelist))
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("pod is not rolling successfully: %v", err)
}
return
}
|
package osus
| ||||
function
|
openshift/openshift-tests-private
|
166a393d-a837-4e63-8e16-04f321ac4747
|
getProxyURL
|
['"net/url"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func getProxyURL() *url.URL {
// Prefer https_proxy, fallback to http_proxy
proxyURLString := os.Getenv("https_proxy")
if proxyURLString == "" {
proxyURLString = os.Getenv("http_proxy")
}
if proxyURLString == "" {
return nil
}
proxyURL, err := url.Parse(proxyURLString)
if err != nil {
e2e.Failf("error parsing proxy URL: %v", err)
}
return proxyURL
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
a3073e37-bb96-4d28-87b5-5d9e1b8fe26c
|
applyResourceFromTemplate
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['resource']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var cfgFileJson string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "osus-resource-cfg.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
cfgFileJson = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("the file of resource is %s", cfgFileJson)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", cfgFileJson).Execute()
}
|
osus
| |||
function
|
openshift/openshift-tests-private
|
4cd9906b-902e-467f-ad35-9849aabba069
|
getRandomString
|
['"math/rand"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
89e96f31-aab2-4bcb-b45c-c659f8f27660
|
create
|
['operatorGroup']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func (og *operatorGroup) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", og.template, "-p", "NAME="+og.name, "NAMESPACE="+og.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
6252fb77-a2e3-4331-9b85-8dcd55ed6dd4
|
create
|
['subscription']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func (sub *subscription) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "NAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.approval, "OPERATORNAME="+sub.operatorName, "SOURCENAME="+sub.sourceName, "SOURCENAMESPACE="+sub.sourceNamespace, "STARTINGCSV="+sub.startingCSV)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
a00a27f5-a870-4957-b6be-3d5da4415089
|
removeResource
|
['"strings"']
|
['resource']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func removeResource(oc *exutil.CLI, parameters ...string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(parameters...).Output()
if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) {
e2e.Logf("No resource found!")
return
}
o.Expect(err).NotTo(o.HaveOccurred())
}
|
osus
| |||
function
|
openshift/openshift-tests-private
|
20ba54ba-3a2b-4af1-8048-98db8bd202c9
|
delete
|
['operatorGroup']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func (og *operatorGroup) delete(oc *exutil.CLI) {
removeResource(oc, "-n", og.namespace, "operatorgroup", og.name)
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
a81a6720-d10e-4ff5-a8c3-1e457cfc7b20
|
delete
|
['subscription']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func (sub *subscription) delete(oc *exutil.CLI) {
removeResource(oc, "-n", sub.namespace, "subscription", sub.name)
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
6d78e5a8-cf5a-436a-b362-9cb278a182f3
|
create
|
['"strconv"']
|
['updateService']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func (us *updateService) create(oc *exutil.CLI) (err error) {
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", us.template, "-p", "NAME="+us.name, "NAMESPACE="+us.namespace, "GRAPHDATA="+us.graphdata, "RELEASES="+us.releases, "REPLICAS="+strconv.Itoa(us.replicas))
return
}
|
osus
| |||
function
|
openshift/openshift-tests-private
|
3817f0c2-87d6-41ba-826d-106ca0b5b07f
|
waitForPodReady
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func waitForPodReady(oc *exutil.CLI, pod string, ns string) {
e2e.Logf("Waiting for %s pod creating...", pod)
pollErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
cmdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector="+pod, "-n", ns).Output()
if err != nil || strings.Contains(cmdOut, "No resources found") {
e2e.Logf("No pod found, keep trying! error: %v, cmdOut: %s", err, cmdOut)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("pod with name=%s is not found", pod))
e2e.Logf("Waiting for %s pod ready and running...", pod)
pollErr = wait.Poll(30*time.Second, 600*time.Second, func() (bool, error) {
stateOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector="+pod, "-n", ns, "-o=jsonpath={.items[*].status.phase}").Output()
if err != nil {
e2e.Logf("pod phase status: %s with error %v, try again", stateOut, err)
return false, nil
}
state := strings.Split(stateOut, " ")
for _, s := range state {
if strings.Compare(s, "Running") != 0 {
e2e.Logf("pod status: %s, try again", s)
return false, nil
}
}
readyOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector="+pod, "-n", ns, "-o=jsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
if err != nil {
e2e.Logf("pod ready condition: %s with error %v, try again", readyOut, err)
return false, nil
}
ready := strings.Split(readyOut, " ")
for _, s := range ready {
if strings.Compare(s, "True") != 0 {
e2e.Logf("pod ready condition: %s, try again", s)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("pod %s is not running", pod))
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
73b76a22-427b-4c25-b1f2-a81236e40857
|
copyFile
|
['"io/ioutil"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func copyFile(source string, dest string) {
bytesRead, err := ioutil.ReadFile(source)
o.Expect(err).NotTo(o.HaveOccurred())
err = ioutil.WriteFile(dest, bytesRead, 0644)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
9e301cac-15c2-4519-83f2-cc85ce840b86
|
locatePodmanCred
|
['"fmt"', '"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func locatePodmanCred(oc *exutil.CLI, dst string) (dirname string, err error) {
e2e.Logf("Setting env for oc-mirror credential")
if err = exutil.GetPullSec(oc, dst); err != nil {
return "", fmt.Errorf("extract pull-secret failed: %v", err)
}
if os.Getenv("CLUSTER_PROFILE_DIR") != "" {
cmd := fmt.Sprintf("jq -s '.[0]*.[1]' %s %s > %s", dst+"/.dockerconfigjson", os.Getenv("CLUSTER_PROFILE_DIR")+"/pull-secret", dst+"/auth.json")
if _, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
return "", fmt.Errorf("%s failed: %v", cmd, err)
}
} else {
copyFile(dst+"/.dockerconfigjson", dst+"/auth.json")
}
envDir := filepath.Join("/tmp/", fmt.Sprintf("ota-%s", getRandomString()))
containerDir := envDir + "/containers/"
key := "XDG_RUNTIME_DIR"
currentRuntime, ex := os.LookupEnv(key)
if !ex {
if err = os.MkdirAll(containerDir, 0700); err != nil {
return "", fmt.Errorf("make dir failed: %v", err)
}
os.Setenv(key, envDir)
copyFile(dst+"/auth.json", containerDir+"auth.json")
return containerDir, nil
}
runtimeContainerDir := currentRuntime + "/containers/"
_, err = os.Stat(runtimeContainerDir + "auth.json")
if os.IsNotExist(err) {
if err = os.MkdirAll(runtimeContainerDir, 0700); err != nil {
return "", fmt.Errorf("make dir failed: %v", err)
}
copyFile(dst+"/auth.json", runtimeContainerDir+"auth.json")
}
return runtimeContainerDir, nil
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
d1b61811-6c5d-414f-9cc3-4f3a958734c7
|
ocmirror
|
['"crypto/tls"', '"fmt"', '"io/ioutil"', '"os"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func ocmirror(oc *exutil.CLI, registry string, dirname string, imageset string) (string, error) {
var imagesetTemplate string
if imageset == "" {
imagesetTemplate = exutil.FixturePath("testdata", "ota", "osus", "imageset-config.yaml")
} else {
imagesetTemplate = imageset
}
sedCmd := fmt.Sprintf("sed -i 's|REGISTRY|%s|g' %s", registry, imagesetTemplate)
// e2e.Logf(sedCmd)
if err := exec.Command("bash", "-c", sedCmd).Run(); err != nil {
e2e.Logf("Update the imageset template failed: %v", err.Error())
return "", err
}
// file, _ := os.Open(imagesetTemplate)
// b, _ := ioutil.ReadAll(file)
// e2e.Logf(string(b))
if err := os.Chdir(dirname); err != nil {
e2e.Logf("Failed to cd %s: %v", dirname, err.Error())
return "", err
}
output, err := oc.WithoutNamespace().WithoutKubeconf().Run("mirror").Args("-c", imagesetTemplate, "--ignore-history", "docker://"+registry, "--dest-skip-tls").Output()
if err != nil {
e2e.Logf("Mirror images failed: %v", err.Error())
return "", err
}
e2e.Logf("output of oc-mirror is %s", output)
substrings := strings.Split(output, " ")
outdir := dirname + "/" + substrings[len(substrings)-1]
return outdir, nil
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
78970110-99cd-43a0-80cb-7b4a5584684d
|
checkCOHealth
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func checkCOHealth(oc *exutil.CLI, co string) bool {
e2e.Logf("Checking CO %s is healthy...", co)
status := "TrueFalseFalse"
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", co, "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
if err != nil {
e2e.Logf("Get co status failed: %v", err.Error())
return false
}
return strings.Contains(output, status)
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
9c56d81e-fad4-4416-b884-14dbe376d938
|
trustCert
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func trustCert(oc *exutil.CLI, registry string, cert string) (err error) {
var output string
certRegistry := registry
before, after, found := strings.Cut(registry, ":")
if found {
certRegistry = before + ".." + after
}
if err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", "openshift-config", "configmap", "trusted-ca", "--from-file="+certRegistry+"="+cert, "--from-file=updateservice-registry="+cert).Execute(); err != nil {
err = fmt.Errorf("create trust-ca configmap failed: %v", err)
return
}
if err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("image.config.openshift.io/cluster", "-p", `{"spec": {"additionalTrustedCA": {"name": "trusted-ca"}}}`, "--type=merge").Execute(); err != nil {
err = fmt.Errorf("patch image.config.openshift.io/cluster failed: %v", err)
return
}
waitErr := wait.Poll(30*time.Second, 10*time.Minute, func() (bool, error) {
registryHealth := checkCOHealth(oc, "image-registry")
if registryHealth {
return true, nil
}
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("co/image-registry", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].message}").Output()
e2e.Logf("Waiting for image-registry coming ready...")
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("Image registry is not ready with info %s\n", output))
return nil
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
ccc7b8b1-5a20-4e71-a07c-e8f4b305301a
|
installOSUSAppOCMirror
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func installOSUSAppOCMirror(oc *exutil.CLI, outdir string) (err error) {
e2e.Logf("Install OSUS instance")
if err = oc.AsAdmin().Run("apply").Args("-f", outdir).Execute(); err != nil {
err = fmt.Errorf("install osus instance failed: %v", err)
return
}
waitForPodReady(oc, "app=update-service-oc-mirror", oc.Namespace())
return nil
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
1596a77e-a978-4af8-b8a7-c67688ec5628
|
getOSUSApp
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func getOSUSApp(oc *exutil.CLI) (instance string, err error) {
e2e.Logf("Get OSUS instance")
instance, err = oc.AsAdmin().Run("get").Args("updateservice", "-o=jsonpath={.items[].metadata.name}").Output()
if err != nil {
err = fmt.Errorf("get OSUS instance failed: %v", err)
}
return
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
1f4bba64-7081-4ba8-8830-fa763cef7a23
|
uninstallOSUSApp
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func uninstallOSUSApp(oc *exutil.CLI) (err error) {
e2e.Logf("Uninstall OSUS instance")
instance, err := getOSUSApp(oc)
if err != nil {
return
}
_, err = oc.AsAdmin().Run("delete").Args("updateservice", instance).Output()
if err != nil {
err = fmt.Errorf("uninstall OSUS instance failed: %v", err)
return
}
return nil
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
ac7cccbd-8cc5-4c3c-b890-5120a203c20b
|
verifyOSUS
|
['"crypto/tls"', '"fmt"', '"net/http"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func verifyOSUS(oc *exutil.CLI) (err error) {
e2e.Logf("Verify the OSUS works")
instance, err := getOSUSApp(oc)
if err != nil {
return fmt.Errorf("get OSUS app failed: %v", err)
}
PEURI, err := oc.AsAdmin().Run("get").Args("-o", "jsonpath={.status.policyEngineURI}", "updateservice", instance).Output()
if err != nil {
return fmt.Errorf("get policy engine URI failed: %v", err)
}
proxyURL := getProxyURL()
transCfg := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
client := &http.Client{Transport: transCfg}
// Any channel is available, we just check whether osus working properly and don't care about the channel itself.
uri := PEURI + "/api/upgrades_info/v1/graph?channel=stable-4.13"
e2e.Logf("check if osus update service available or not through graph URI: " + uri)
waitErr := wait.Poll(20*time.Second, 5*time.Minute, func() (bool, error) {
response, err := client.Get(uri)
if err != nil {
msg := fmt.Sprintf("reach graph URI failed: %v", err)
e2e.Logf(msg)
return false, nil
}
if response.StatusCode != 200 {
msg := fmt.Sprintf("graph URI is not active, response code is %v", response.StatusCode)
e2e.Logf(msg)
return false, nil
}
return true, nil
})
if waitErr != nil {
// Get OSUS pod logs when OSUS returns non-200
allOSUSPods, _ := oc.AsAdmin().Run("get").Args("pod").Output()
e2e.Logf("All OSUS pods: \n", allOSUSPods)
osusPod, _ := oc.AsAdmin().Run("get").Args("pod", "--selector=app="+instance, "-o=jsonpath={.items[].metadata.name}").Output()
podLogs, _ := oc.AsAdmin().Run("logs").Args(osusPod, "-c", "graph-builder").Output()
failLogs, _ := exec.Command("bash", "-c", "echo \""+podLogs+"\" | grep -Ei 'error|fail'").Output()
// Hardcode the max log lines
if len(failLogs) <= 1024 {
e2e.Logf("OSUS Pod Logs with error or fail info are: \n %s", failLogs)
} else {
e2e.Logf("OSUS Pod Logs with error or fail info are: \n %s", failLogs[len(failLogs)-1024:])
}
describePod, _ := oc.AsAdmin().Run("describe").Args("pod", osusPod).Output()
e2e.Logf("Describe the OSUS pod: \n %s", describePod)
}
exutil.AssertWaitPollNoErr(waitErr, "graph URI is not active")
return
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
372fb258-de35-45db-83eb-3d25f70225bd
|
restoreAddCA
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func restoreAddCA(oc *exutil.CLI, addCA string) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-config", "configmap", "trusted-ca").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
var message string
if addCA == "" {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("image.config.openshift.io/cluster", "--type=json", "-p", "[{\"op\":\"remove\", \"path\":\"/spec/additionalTrustedCA\"}]").Execute()
} else {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("image.config.openshift.io/cluster", "--type=merge", "--patch", fmt.Sprintf("{\"spec\":{\"additionalTrustedCA\":%s}}", addCA)).Execute()
}
o.Expect(err).NotTo(o.HaveOccurred())
waitErr := wait.Poll(30*time.Second, 3*time.Minute, func() (bool, error) {
registryHealth := checkCOHealth(oc, "image-registry")
if registryHealth {
return true, nil
}
message, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("co/image-registry", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].message}").Output()
e2e.Logf("Wait for image-registry coming ready")
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("Image registry is not ready with info %s\n", message))
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
efbf562c-160f-451c-b250-e451465f5dd8
|
buildPushGraphImage
|
['"crypto/tls"', '"fmt"', '"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func buildPushGraphImage(oc *exutil.CLI, tag string, dirname string) (err error) {
e2e.Logf("Build graph-data image")
dockerFile := exutil.FixturePath("testdata", "ota", "osus", "Dockerfile")
cmd := fmt.Sprintf("podman build -f %s -t %s", dockerFile, tag)
var out []byte
if out, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
err = fmt.Errorf("%s failed: %v\n%s", cmd, err, string(out))
return
}
cmd = fmt.Sprintf("podman push --trace --authfile %s --tls-verify=false %s", dirname+"/.dockerconfigjson", tag)
if out, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
err = fmt.Errorf("%s failed: %v\n%s", cmd, err, string(out))
return
}
return
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
ec6d2dfc-3d2b-4bce-9a65-a2009386ccf0
|
mirror
|
['"fmt"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func mirror(oc *exutil.CLI, registry string, payload string, dirname string) (err error) {
e2e.Logf("Mirror OCP images by oc adm release mirror")
_, tag, found := strings.Cut(payload, ":")
if !found {
err = fmt.Errorf("the payload is invalid")
return
}
cmd := fmt.Sprintf("oc adm release mirror -a %s --insecure=true --from %s --to=%s --to-release-image=%s", dirname+"/.dockerconfigjson", payload, registry+"/ocp-image", registry+"/ocp-release:"+tag)
var out []byte
if out, err = exec.Command("bash", "-c", cmd).CombinedOutput(); err != nil {
err = fmt.Errorf("%s failed: %v\n%s", cmd, err, string(out))
return
}
return
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
84d95caf-3e2f-47c5-9396-1abcbcd6cd9b
|
installOSUSAppOC
|
['"fmt"']
|
['updateService']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func installOSUSAppOC(oc *exutil.CLI, us updateService) (err error) {
e2e.Logf("Install OSUS instance")
if err = us.create(oc); err != nil {
err = fmt.Errorf("install osus instance failed: %v", err)
return
}
waitForPodReady(oc, "app="+us.name, oc.Namespace())
return nil
}
|
osus
| |||
function
|
openshift/openshift-tests-private
|
31cfbdbe-6686-44be-9849-83490a345ac7
|
installOSUSOperator
|
['"fmt"', '"path/filepath"', '"time"']
|
['operatorGroup', 'subscription']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func installOSUSOperator(oc *exutil.CLI, version string, mode string) {
e2e.Logf("Install OSUS operator")
testDataDir := exutil.FixturePath("testdata", "ota/osus")
ogTemp := filepath.Join(testDataDir, "operatorgroup.yaml")
subTemp := filepath.Join(testDataDir, "subscription.yaml")
var csv string
if version == "" {
csv = version
} else {
csv = fmt.Sprintf("update-service-operator.v%s", version)
}
og := operatorGroup{
name: "osus-og",
namespace: oc.Namespace(),
template: ogTemp,
}
sub := subscription{
name: "osus-sub",
namespace: oc.Namespace(),
channel: "v1",
approval: mode,
operatorName: "cincinnati-operator",
sourceName: "qe-app-registry",
sourceNamespace: "openshift-marketplace",
startingCSV: csv,
template: subTemp,
}
e2e.Logf("Create OperatorGroup...")
og.create(oc)
e2e.Logf("Create Subscription...")
sub.create(oc)
if mode == "Manual" && version != "" {
e2e.Logf("Approve installplan manually...")
jsonpath := fmt.Sprintf("-o=jsonpath={.items[?(@.spec.clusterServiceVersionNames[]=='%s')].metadata.name}", csv)
o.Eventually(func() string {
osusIP, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", jsonpath, "-n", oc.Namespace()).Output()
e2e.Logf("waiting for ip: %s", osusIP)
return osusIP
}, 3*time.Minute, 1*time.Minute).ShouldNot(o.BeEmpty(), "Fail to generate installplan!")
osusIP, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", jsonpath, "-n", oc.Namespace()).Output()
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("installplan", osusIP, "--type=json", "-p", "[{\"op\": \"replace\", \"path\": \"/spec/approved\", \"value\": true}]", "-n", oc.Namespace()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
waitForPodReady(oc, "name=updateservice-operator", oc.Namespace())
}
|
osus
| |||
function
|
openshift/openshift-tests-private
|
e45914d2-8a01-4c3e-8cde-9d774e90a626
|
upgradeOSUS
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func upgradeOSUS(oc *exutil.CLI, usname string, version string) error {
e2e.Logf("Check installplan available...")
ips, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
if len(strings.Fields(ips)) != 2 {
return fmt.Errorf("unexpected installplan found: %s", ips)
}
preOPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
preAPPName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=app="+usname, "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
e2e.Logf("Manually approve new installplan for update...")
jsonpath := fmt.Sprintf("-o=jsonpath={.items[?(@.spec.clusterServiceVersionNames[]=='update-service-operator.v%s')].metadata.name}", version)
osusIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("installplan", jsonpath, "-n", oc.Namespace()).Output()
if err != nil {
return err
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("installplan", osusIP, "--type=json", "-p", "[{\"op\": \"replace\", \"path\": \"/spec/approved\", \"value\": true}]", "-n", oc.Namespace()).Execute()
if err != nil {
return err
}
e2e.Logf("Waiting for operator and operand pods rolling...")
var (
postOPName string
errOP error
)
preAppList := strings.Fields(preAPPName)
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, 5*time.Minute, true, func(context.Context) (bool, error) {
postOPName, errOP = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=name=updateservice-operator", "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
postAPPName, errAPP := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector=app="+usname, "-o=jsonpath={.items[*].metadata.name}", "-n", oc.Namespace()).Output()
if errOP != nil || errAPP != nil {
return false, nil
}
if strings.Compare(postOPName, preOPName) == 0 {
e2e.Logf("waiting: operator pods after upgrade: %s; while operator pods before upgrade: %s", postOPName, preOPName)
return false, nil
}
for _, pre := range preAppList {
if strings.Contains(postAPPName, pre) {
e2e.Logf("waiting: app pods after upgrade: %s; while app pods before upgrade: %s", postAPPName, preAPPName)
return false, nil
}
}
if len(strings.Fields(postAPPName)) != len(preAppList) {
e2e.Logf("waiting for pods [%s] to expected number %d", postAPPName, len(preAppList))
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("pod is not rolling successfully after upgrade: %v", err)
}
csvInPostPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", postOPName, "-o=jsonpath={.spec.containers[].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}", "-n", oc.Namespace()).Output()
if err != nil {
return err
}
if !strings.Contains(csvInPostPod, version) {
return fmt.Errorf("unexpected operator version upgraded: %s", csvInPostPod)
}
return nil
}
|
osus
| ||||
function
|
openshift/openshift-tests-private
|
83463a14-d1c4-48b0-a8f9-11a282285c47
|
skipUnsupportedOCPVer
|
['"fmt"']
|
['supportedMap']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func skipUnsupportedOCPVer(oc *exutil.CLI, version string) {
mapTest := supportedMap{
osusver: "4.9.1",
ocpver: []string{"4.8", "4.9", "4.10", "4.11"},
}
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if version != mapTest.osusver {
g.Skip(fmt.Sprintf("Skip test for cluster with unrecoginzed old osus version %s!", version))
}
skip := true
for _, ver := range mapTest.ocpver {
if clusterVersion == ver {
skip = false
break
}
}
if skip {
g.Skip("Skip test for cluster with old osus on unsupported ocp version!")
}
}
|
osus
| |||
function
|
openshift/openshift-tests-private
|
afbb5b32-dc8f-48b7-91ac-e530edfdd850
|
verifyAppRolling
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/ota/osus/utils.go
|
func verifyAppRolling(oc *exutil.CLI, usname string, prelist []string) (postlist []string, err error) {
e2e.Logf("Waiting for operand pods rolling...")
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, 5*time.Minute, true, func(context.Context) (bool, error) {
postAPPName, err := oc.AsAdmin().Run("get").Args("pods", "--selector=app="+usname, "-o=jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}").Output()
if err != nil {
return false, nil
}
for _, pre := range prelist {
if strings.Contains(postAPPName, pre) {
e2e.Logf("waiting: current app pods: %s; while app pods before rolling: %s", postAPPName, prelist)
return false, nil
}
}
postlist = strings.Fields(postAPPName)
if len(postlist) != len(prelist) {
e2e.Logf("waiting for pods [%s] to expected number %d", postlist, len(prelist))
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("pod is not rolling successfully: %v", err)
}
return
}
|
osus
| ||||
file
|
openshift/openshift-tests-private
|
3ffd395e-be3f-4011-b58f-ea64789ba723
|
ocperf-util
|
import (
"fmt"
"regexp"
"strings"
"sync"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
package perfscale
import (
"fmt"
"regexp"
"strings"
"sync"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// getImagestreamImageName Return an imagestream's image repository name
func getImagestreamImageName(oc *exutil.CLI, imagestreamName string) string {
var imageName string
imageName = ""
// Ignore NotFound error, it will return a empty string, then use another image in ocperf.go if the image doesn't exit
imageRepos, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", "-ojsonpath={.status.dockerImageRepository}").Output()
if !strings.Contains(imageRepos, "NotFound") {
imageTags, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", "-ojsonpath={.status.tags[*].tag}").Output()
imageTagList := strings.Split(imageTags, " ")
// Because some image stream tag is broken, we need to find which image is available in disconnected cluster.
for i := 0; i < len(imageTagList); i++ {
jsonathStr := fmt.Sprintf(`-ojsonpath='{.status.tags[%v].conditions[?(@.status=="False")]}{.status.tags[%v].tag}'`, i, i)
stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", jsonathStr).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdOut).NotTo(o.BeEmpty())
e2e.Logf("stdOut is: %v", stdOut)
if !strings.Contains(stdOut, "NotFound") {
imageTag := strings.ReplaceAll(stdOut, "'", "")
imageName = imageRepos + ":" + imageTag
break
}
}
}
return imageName
}
func createNSUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
func checkIfNSIsInExpectedState(oc *exutil.CLI, expectedNum int, nsPattern string) {
o.Eventually(func() bool {
stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nsReg := regexp.MustCompile(nsPattern + ".*")
perfCliNsList := nsReg.FindAllString(stdOut, -1)
nsNum := len(perfCliNsList)
e2e.Logf("current ns is: %v", nsNum)
return nsNum == expectedNum
}, 120*time.Second, 5*time.Second).Should(o.BeTrue())
}
func createDeploymentServiceUsingOCCLI(oc *exutil.CLI, namespace string, ocpPerfAppService string, ocpPerfAppDeployment string, ocPerfAppImageName string, wg *sync.WaitGroup) {
exutil.ApplyNsResourceFromTemplate(oc, namespace, "--ignore-unknown-parameters=true", "-f", ocpPerfAppDeployment, "-p", "IMAGENAME="+ocPerfAppImageName)
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", ocpPerfAppService, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
func checkIfDeploymentIsInExpectedState(oc *exutil.CLI, namespace string, resName string) {
var (
isCreated bool
desiredNum string
readyNum string
)
o.Eventually(func() bool {
kindNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", resName, "-n", namespace, "-oname").Output()
if strings.Contains(kindNames, "NotFound") || strings.Contains(kindNames, "No resources") || len(kindNames) == 0 || err != nil {
isCreated = false
} else {
//deployment/statefulset has been created, but not running, need to compare .status.readyReplicas and in .status.replicas
isCreated = true
readyNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.readyReplicas}").Output()
desiredNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.replicas}").Output()
}
return isCreated && desiredNum == readyNum
}, 120*time.Second, time.Second).Should(o.BeTrue())
}
func getResourceUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment,sa,secret", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
func scaleDownDeploymentUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("deployment", "-n", namespace, "--replicas=0", "--all").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
func deleteNSUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
|
package perfscale
| ||||
function
|
openshift/openshift-tests-private
|
6322c009-5cca-4c40-9fa0-be603e287923
|
getImagestreamImageName
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func getImagestreamImageName(oc *exutil.CLI, imagestreamName string) string {
var imageName string
imageName = ""
// Ignore NotFound error, it will return a empty string, then use another image in ocperf.go if the image doesn't exit
imageRepos, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", "-ojsonpath={.status.dockerImageRepository}").Output()
if !strings.Contains(imageRepos, "NotFound") {
imageTags, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", "-ojsonpath={.status.tags[*].tag}").Output()
imageTagList := strings.Split(imageTags, " ")
// Because some image stream tag is broken, we need to find which image is available in disconnected cluster.
for i := 0; i < len(imageTagList); i++ {
jsonathStr := fmt.Sprintf(`-ojsonpath='{.status.tags[%v].conditions[?(@.status=="False")]}{.status.tags[%v].tag}'`, i, i)
stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", jsonathStr).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdOut).NotTo(o.BeEmpty())
e2e.Logf("stdOut is: %v", stdOut)
if !strings.Contains(stdOut, "NotFound") {
imageTag := strings.ReplaceAll(stdOut, "'", "")
imageName = imageRepos + ":" + imageTag
break
}
}
}
return imageName
}
|
perfscale
| ||||
function
|
openshift/openshift-tests-private
|
c7e2fe34-3c60-4985-901b-4f0768c23f48
|
createNSUsingOCCLI
|
['"sync"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func createNSUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
|
perfscale
| ||||
function
|
openshift/openshift-tests-private
|
baca2cfb-f9c6-49b1-9979-212a27c6af20
|
checkIfNSIsInExpectedState
|
['"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func checkIfNSIsInExpectedState(oc *exutil.CLI, expectedNum int, nsPattern string) {
o.Eventually(func() bool {
stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nsReg := regexp.MustCompile(nsPattern + ".*")
perfCliNsList := nsReg.FindAllString(stdOut, -1)
nsNum := len(perfCliNsList)
e2e.Logf("current ns is: %v", nsNum)
return nsNum == expectedNum
}, 120*time.Second, 5*time.Second).Should(o.BeTrue())
}
|
perfscale
| ||||
function
|
openshift/openshift-tests-private
|
355aabef-9bab-4ef3-9436-043289142a59
|
createDeploymentServiceUsingOCCLI
|
['"sync"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func createDeploymentServiceUsingOCCLI(oc *exutil.CLI, namespace string, ocpPerfAppService string, ocpPerfAppDeployment string, ocPerfAppImageName string, wg *sync.WaitGroup) {
exutil.ApplyNsResourceFromTemplate(oc, namespace, "--ignore-unknown-parameters=true", "-f", ocpPerfAppDeployment, "-p", "IMAGENAME="+ocPerfAppImageName)
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", ocpPerfAppService, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
|
perfscale
| ||||
function
|
openshift/openshift-tests-private
|
c65e7cd4-2ecb-48ee-bd2b-592127a64396
|
checkIfDeploymentIsInExpectedState
|
['"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func checkIfDeploymentIsInExpectedState(oc *exutil.CLI, namespace string, resName string) {
var (
isCreated bool
desiredNum string
readyNum string
)
o.Eventually(func() bool {
kindNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", resName, "-n", namespace, "-oname").Output()
if strings.Contains(kindNames, "NotFound") || strings.Contains(kindNames, "No resources") || len(kindNames) == 0 || err != nil {
isCreated = false
} else {
//deployment/statefulset has been created, but not running, need to compare .status.readyReplicas and in .status.replicas
isCreated = true
readyNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.readyReplicas}").Output()
desiredNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.replicas}").Output()
}
return isCreated && desiredNum == readyNum
}, 120*time.Second, time.Second).Should(o.BeTrue())
}
|
perfscale
| ||||
function
|
openshift/openshift-tests-private
|
2dcecbad-b6c1-492b-bbd0-df3657860739
|
getResourceUsingOCCLI
|
['"sync"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func getResourceUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment,sa,secret", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
|
perfscale
| ||||
function
|
openshift/openshift-tests-private
|
365fe389-5f9a-483a-b8a6-f7ed5e79c1d9
|
scaleDownDeploymentUsingOCCLI
|
['"sync"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func scaleDownDeploymentUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("deployment", "-n", namespace, "--replicas=0", "--all").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
|
perfscale
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.