element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
5bbbd474-dd39-46cf-b103-211f3a50ce3f
|
Author:Vibhu-HyperShiftMGMT-ROSA-High-55352-observability operator self monitoring
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-High-55352-observability operator self monitoring", func() {
exutil.By("Check observability operator monitoring")
checkOperatorMonitoring(oc, oboBaseDir)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
54099a89-907e-414e-9d9b-06980d4715fb
|
Author:Vibhu-HyperShiftMGMT-ROSA-LEVEL0-Critical-55349-verify observability operator
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-LEVEL0-Critical-55349-verify observability operator", func() {
exutil.By("Check the label in namespace")
checkLabel(oc)
exutil.By("Check observability operator pods")
checkOperatorPods(oc)
exutil.By("Check liveliness/readiness probes implemented in observability operator pod")
checkPodHealth(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7b5d6e7c-c05e-4f2b-ac04-f54b70ad82f5
|
Author:Vibhu-HyperShiftMGMT-ROSA-High-59383-verify OBO discovered and collected metrics of HCP
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-High-59383-verify OBO discovered and collected metrics of HCP", func() {
if exutil.IsROSACluster(oc) {
exutil.By("Check scrape targets")
checkHCPTargets(oc)
exutil.By("Check metric along with value")
checkMetricValue(oc, "rosa_mc")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
51a07e0d-a52c-4a55-b0c1-5789c678a4e0
|
Author:Vibhu-Critical-59384-High-59674-create monitoringstack to discover any target and verify observability operator discovered target and collected metrics of example APP
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:Vibhu-Critical-59384-High-59674-create monitoringstack to discover any target and verify observability operator discovered target and collected metrics of example APP", func() {
defer deleteMonitoringStack(oc, monitoringStackDescription{}, monitoringStackSecretDescription{}, "monitor_example_app")
exutil.By("Create monitoring stack")
createCustomMonitoringStack(oc, oboBaseDir)
exutil.By("Create example app")
oc.SetupProject()
ns := oc.Namespace()
createExampleApp(oc, oboBaseDir, ns)
exutil.By("Check scrape target")
checkExampleAppTarget(oc)
exutil.By("Check metric along with value")
checkMetricValue(oc, "monitor_example_app")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0eb9c92c-5d32-4ea6-b6bc-e8a1bc91072d
|
Author:tagao-Critical-78217-COO should pass DAST test [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:tagao-Critical-78217-COO should pass DAST test [Serial]", func() {
exutil.By("trigger a job to install RapiDAST then scan APIs")
configFile := filepath.Join(oboBaseDir, "rapidastconfig_coo.yaml")
policyFile := filepath.Join(oboBaseDir, "customscan.policy")
_, err := rapidastScan(oc, oc.Namespace(), configFile, policyFile, "coo")
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
file
|
openshift/openshift-tests-private
|
0f571738-e9c6-4020-9e99-3b0d8674b3b2
|
observability_operator_utils
|
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"reflect"
"github.com/tidwall/gjson"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
package monitoring
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"reflect"
"github.com/tidwall/gjson"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type monitoringStackSecretDescription struct {
name string
namespace string
template string
}
type monitoringStackDescription struct {
name string
clusterID string
namespace string
secretName string
tokenURL string
url string
region string
template string
}
const (
subName = "observability-operator"
ogName = "observability-operator-og"
namespace = "openshift-observability-operator"
monSvcName = "hypershift-monitoring-stack-prometheus"
)
var (
csvName string
targets = []string{"catalog-operator", "cluster-version-operator", "etcd", "kube-apiserver", "kube-controller-manager", "monitor-multus-admission-controller", "monitor-ovn-master-metrics", "node-tuning-operator", "olm-operator", "openshift-apiserver", "openshift-controller-manager", "openshift-route-controller-manager"}
)
func checkSubscription(oc *exutil.CLI) (out string, err error) {
exutil.By("Check the state of Operator")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("subscription", subName, "-n", namespace, "-o=jsonpath={.status.state}").Output()
if strings.Contains(out, "NotFound") || strings.Contains(out, "No resources") || err != nil {
return false, err
}
if strings.Compare(out, "AtLatestKnown") == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Subscription %v doesnot contain the correct status in namespace %v", subName, namespace))
exutil.By("Get ClusterServiceVersion name")
csvName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("subscription", subName, "-n", namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check that ClusterServiceVersion " + csvName + " is finished")
errCheck = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterserviceversions", csvName, "-n", namespace, "-o=jsonpath={.status.phase}{.status.reason}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(out, "SucceededInstallSucceeded") == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ClusterServiceVersion %v is not successfully finished in namespace %v with error: %v", csvName, namespace, err))
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("subscription", subName, "-n", namespace, "--no-headers").Output()
return out, err
}
func createOperator(oc *exutil.CLI, ogTemplate string, subTemplate string, nsTemplate string) {
exutil.By("Create Namespace")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", nsTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
exutil.By("Create Operator Group")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", ogTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
exutil.By("Create subscription")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
out, err := checkSubscription(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Output: %v", out)
}
func createObservabilityOperator(oc *exutil.CLI, oboBaseDir string) {
ogTemplate := filepath.Join(oboBaseDir, "operator-group.yaml")
subTemplate := filepath.Join(oboBaseDir, "subscription.yaml")
nsTemplate := filepath.Join(oboBaseDir, "namespace.yaml")
exutil.By("Install Observability Operator")
createOperator(oc, ogTemplate, subTemplate, nsTemplate)
exutil.By("create servicemonitor")
smTemplate := filepath.Join(oboBaseDir, "obo-service-monitor.yaml")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", smTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
cooVersion, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-o=jsonpath={.items[?(@.spec.displayName==\"Cluster Observability Operator\")].metadata.name}", "-n", "openshift-monitoring").Output()
waitErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("csv/"+cooVersion, "--for=jsonpath={.status.phase}=Succeeded", "--timeout=5m", "-n", "openshift-monitoring").Execute()
if waitErr != nil {
g.Skip("COO is not ready or been installed")
}
}
func getClusterDetails(oc *exutil.CLI) (clusterID string, region string) {
cluserID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversions", "version", "-o=jsonpath={.spec.clusterID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus..region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return cluserID, region
}
func createMonitoringStack(oc *exutil.CLI, msD monitoringStackDescription, secD monitoringStackSecretDescription) {
exutil.By("Creating Monitoring Stack")
createStack(oc, msD, secD, "rosa_mc", "")
}
func createStack(oc *exutil.CLI, msD monitoringStackDescription, secD monitoringStackSecretDescription, stack, oboBaseDir string) {
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
exutil.By("Creating Secret")
secFile, err := oc.AsAdmin().Run("process").Args("-f", secD.template, "-p", "NAME="+secD.name, "NAMESPACE="+secD.namespace).OutputToFile(getRandomString() + "ms-secret.json")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", secFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install Monitoring Stack")
msFile, err := oc.AsAdmin().Run("process").Args("-f", msD.template, "-p", "CLUSTERID="+msD.clusterID, "REGION="+msD.region, "NAME="+msD.name, "NAMESPACE="+msD.namespace, "SECRETNAME="+msD.secretName, "TOKENURL="+msD.tokenURL, "URL="+msD.url).OutputToFile(getRandomString() + "ms.json")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", msFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
if stack == "monitor_example_app" {
exutil.By("Install Monitoring Stack")
var msTemplate string
if exutil.IsSNOCluster(oc) {
msTemplate = filepath.Join(oboBaseDir, "example-app-monitoring-stack-sno.yaml")
} else {
msTemplate = filepath.Join(oboBaseDir, "example-app-monitoring-stack.yaml")
}
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", msTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Check MonitoringStack status")
checkMonitoringStack(oc, msD, stack)
exutil.By("Check MonitoringStack Prometheus pods status")
checkMonitoringStackPods(oc, stack)
}
func checkMonitoringStack(oc *exutil.CLI, msD monitoringStackDescription, stack string) {
var name string
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
name = msD.name
}
if stack == "monitor_example_app" {
name = "example-app-monitoring-stack"
}
exutil.By("Check the state of MonitoringStack")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", name, "-n", namespace, "-o=jsonpath={.status.conditions[*].reason}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(out, "MonitoringStackAvailable") {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Monitoring Stack %v doesnot contain the correct status in namespace %v", name, namespace))
}
func checkMonitoringStackPods(oc *exutil.CLI, stack string) {
exutil.By("Check " + namespace + " namespace monitoringstack pods liveliness")
var name string
if stack == "rosa_mc" {
name = "hypershift-monitoring-stack"
}
if stack == "monitor_example_app" {
name = "example-app-monitoring-stack"
}
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", "prometheus="+name, "-o=jsonpath={.items[*].status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if exutil.IsSNOCluster(oc) {
if strings.Compare(out, "Running") == 0 {
return true, nil
}
} else {
if strings.Compare(out, "Running Running") == 0 {
return true, nil
}
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v namespace monitoringstack pods are not in healthy state", namespace))
}
func checkOperatorPods(oc *exutil.CLI) {
exutil.By("Check " + namespace + " namespace pods liveliness")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-o", "jsonpath={.items[*].status.phase}").Output()
if strings.Compare(out, "Running Running Running Running") == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v namespace does not contain pods", namespace))
}
func checkRemoteWriteConfig(oc *exutil.CLI, msD monitoringStackDescription) {
var (
actual interface{}
expected interface{}
remoteWriteExpected = fmt.Sprintf(`[
{
"oauth2": {
"clientId": {
"secret": {
"key": "client-id",
"name": "%v"
}
},
"clientSecret": {
"key": "client-secret",
"name": "%v"
},
"tokenUrl": "%v"
},
"url": "%v"
}
]`, msD.secretName, msD.secretName, msD.tokenURL, msD.url)
)
exutil.By("Check remote write config")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", msD.name, "-n", msD.namespace, "-o=jsonpath={.spec.prometheusConfig.remoteWrite}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
actual = gjson.Parse(out).Value()
expected = gjson.Parse(remoteWriteExpected).Value()
if reflect.DeepEqual(actual, expected) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Remote write config is not correct in monitoringstack %v in %v namespace", msD.name, msD.namespace))
}
func checkMonitoringStackDetails(oc *exutil.CLI, msD monitoringStackDescription, stack string) {
var name string
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
name = msD.name
exutil.By("Get clusterID and region")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", msD.name, "-n", msD.namespace, "-o=jsonpath={.spec.prometheusConfig.externalLabels.hypershift_cluster_id}{.spec.prometheusConfig.externalLabels.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(out, msD.clusterID+msD.region) == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ClusterID and region did not match. Expected: %v %v", msD.clusterID, msD.region))
}
if stack == "custom" {
name = "hypershift-monitoring-stack"
}
exutil.By("Check status of MonitoringStack")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", name, "-n", namespace, "-o=jsonpath={.status.conditions[*].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(out, "False") {
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("MonitoringStack %v reports invalid status in namespace %v", name, namespace))
}
func deleteMonitoringStack(oc *exutil.CLI, msD monitoringStackDescription, secD monitoringStackSecretDescription, stack string) {
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
exutil.By("Removing MonitoringStack " + msD.name)
errStack := oc.AsAdmin().WithoutNamespace().Run("delete").Args("monitoringstack", msD.name, "-n", msD.namespace).Execute()
exutil.By("Removing MonitoringStack Secret " + secD.name)
errSecret := oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", secD.name, "-n", secD.namespace).Execute()
o.Expect(errStack).NotTo(o.HaveOccurred())
o.Expect(errSecret).NotTo(o.HaveOccurred())
}
if stack == "monitor_example_app" {
exutil.By("Removing MonitoringStack hypershift-monitoring-stack")
errStack := oc.AsAdmin().WithoutNamespace().Run("delete").Args("monitoringstack", "example-app-monitoring-stack", "-n", "openshift-observability-operator").Execute()
o.Expect(errStack).NotTo(o.HaveOccurred())
}
}
func deleteOperator(oc *exutil.CLI) {
exutil.By("Removing servicemoitor")
errSm := oc.AsAdmin().WithoutNamespace().Run("delete").Args("servicemonitors.monitoring.coreos.com", "observability-operator", "-n", namespace).Execute()
exutil.By("Removing ClusterServiceVersion " + csvName)
errCsv := oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterserviceversions", csvName, "-n", namespace).Execute()
exutil.By("Removing Subscription " + subName)
errSub := oc.AsAdmin().WithoutNamespace().Run("delete").Args("subscription", subName, "-n", namespace).Execute()
exutil.By("Removing OperatorGroup " + ogName)
errOg := oc.AsAdmin().WithoutNamespace().Run("delete").Args("operatorgroup", ogName, "-n", namespace).Execute()
exutil.By("Removing Namespace " + namespace)
errNs := oc.AsAdmin().WithoutNamespace().Run("delete").Args("namespace", namespace, "--force").Execute()
crds, err := oc.AsAdmin().WithoutNamespace().Run("api-resources").Args("--api-group=monitoring.rhobs", "-o", "name").Output()
if err != nil {
e2e.Logf("err %v, crds %v", err, crds)
} else {
crda := append([]string{"crd"}, strings.Split(crds, "\n")...)
errCRD := oc.AsAdmin().WithoutNamespace().Run("delete").Args(crda...).Execute()
o.Expect(errCRD).NotTo(o.HaveOccurred())
}
o.Expect(errSm).NotTo(o.HaveOccurred())
o.Expect(errCsv).NotTo(o.HaveOccurred())
o.Expect(errSub).NotTo(o.HaveOccurred())
o.Expect(errOg).NotTo(o.HaveOccurred())
o.Expect(errNs).NotTo(o.HaveOccurred())
}
func checkRuleExists(oc *exutil.CLI, token, routeName, namespace, ruleName string) bool {
var rules []gjson.Result
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
path, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", routeName, "-n", namespace, "-o=jsonpath={.spec.path}").Output()
if err != nil {
return false, nil
}
host, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", routeName, "-n", namespace, "-o=jsonpath={.spec.host}").Output()
if err != nil {
return false, nil
}
ruleCmd := fmt.Sprintf("curl -G -s -k -H\"Authorization: Bearer %s\" https://%s%s/v1/rules", token, host, path)
out, err := exec.Command("bash", "-c", ruleCmd).Output()
if err != nil {
return false, nil
}
rules = gjson.ParseBytes(out).Get("data.groups.#.file").Array()
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Rules are not loaded")
for _, rule := range rules {
if strings.Contains(rule.String(), ruleName) {
return true
}
}
return false
}
func checkConfigMapExists(oc *exutil.CLI, namespace, configmapName, checkStr string) bool {
searchOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", configmapName, "-n", namespace, "-o=jsonpath={.data.config\\.yaml}").Output()
if err != nil {
return false
}
if strings.Contains(searchOutput, checkStr) {
return true
}
return false
}
func createConfig(oc *exutil.CLI, namespace, cmName, config string) {
if !checkConfigMapExists(oc, namespace, cmName, "enableUserWorkload: true") {
e2e.Logf("Create configmap: user-workload-monitoring-config")
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", config).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func checkOperatorMonitoring(oc *exutil.CLI, oboBaseDir string) {
exutil.By("Check if UWM exists")
uwMonitoringConfig := filepath.Join(oboBaseDir, "user-workload-monitoring-cm.yaml")
createConfig(oc, "openshift-monitoring", "cluster-monitoring-config", uwMonitoringConfig)
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check prometheus rules")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrule", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(out, "alertmanager-rules") && strings.Contains(out, "prometheus-operator-rules") && strings.Contains(out, "prometheus-rules") && strings.Contains(out, "observability-operator-rules") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Prometheus rules are not created in %v namespace", namespace))
exutil.By("Check Observability Operator Alertmanager Rules")
errCheck = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
IsAlertManagerRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-alertmanager-rules")
exutil.By("Check Observability Operator Prometheus Operator Rules")
IsPrometheusOperatorRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-prometheus-operator-rules")
exutil.By("Check Observability Operator Prometheus Rules")
IsPrometheusRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-prometheus-rules")
exutil.By("Check Observability Operator Rules")
IsOperatorRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-rules")
if IsAlertManagerRule && IsPrometheusOperatorRule && IsPrometheusRule && IsOperatorRule {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Observability operator rules are not loaded")
exutil.By("Check Observability Operator metrics")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query={__name__=~"controller_runtime_reconcile.*",job="observability-operator",namespace="openshift-observability-operator"}'`, token, "openshift-observability-operator", uwmLoadTime)
}
func checkLabel(oc *exutil.CLI) {
var labelName = "network.openshift.io/policy-group=monitoring"
exutil.By("Check if the label" + labelName + "exists in the namespace" + namespace)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", namespace, "-o=jsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(out, "monitoring")).To(o.BeTrue())
}
func checkPodHealth(oc *exutil.CLI) {
var (
actualLiveness interface{}
actualReadiness interface{}
outputLiveness = `{
"failureThreshold": 3,
"httpGet": {
"path": "/healthz",
"port": 8081,
"scheme": "HTTP"
},
"periodSeconds": 10,
"successThreshold": 1,
"timeoutSeconds": 1
}`
outputReadiness = `{
"failureThreshold": 3,
"httpGet": {
"path": "/healthz",
"port": 8081,
"scheme": "HTTP"
},
"periodSeconds": 10,
"successThreshold": 1,
"timeoutSeconds": 1
}`
expectedLiveness = gjson.Parse(outputLiveness).Value()
expectedReadiness = gjson.Parse(outputReadiness).Value()
)
exutil.By("Check remote write config")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
exutil.By("Get the observability operator pod")
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", "app.kubernetes.io/name=observability-operator", "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the liveliness for " + podName)
livenessOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(podName, "-n", namespace, "-o=jsonpath={.spec.containers[].livenessProbe}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
readinessOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(podName, "-n", namespace, "-o=jsonpath={.spec.containers[].readinessProbe}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Live: %v", livenessOut)
e2e.Logf("Ready: %v", readinessOut)
actualLiveness = gjson.Parse(livenessOut).Value()
actualReadiness = gjson.Parse(readinessOut).Value()
if reflect.DeepEqual(actualLiveness, expectedLiveness) && reflect.DeepEqual(actualReadiness, expectedReadiness) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, "liveness/readiness probe not implemented correctly in observability operator pod")
}
func checkHCPTargets(oc *exutil.CLI) {
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check whether the scrape targets are present")
for _, target := range targets {
checkMetric(oc, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=prometheus_sd_discovered_targets{config=~".*%s.*"}' `, monSvcName, namespace, target), token, target, platformLoadTime)
}
}
func checkExampleAppTarget(oc *exutil.CLI) {
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check whether the scrape targets are present")
checkMetric(oc, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=prometheus_sd_discovered_targets{config=~".*%s.*"}' `, "example-app-monitoring-stack-prometheus", namespace, "prometheus-example-monitor"), token, "prometheus-example-monitor", uwmLoadTime)
}
func checkIfMetricValueExists(oc *exutil.CLI, token, url string, timeout time.Duration) {
var (
res string
err error
)
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
res, err = exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", getCmd)
val := gjson.Parse(res).Get("data.result.#.value").Array()
if err != nil || len(val) == 0 {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metric %s does not contain any value", res))
}
func checkMetricValue(oc *exutil.CLI, clusterType string) {
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check the metrics exists and contain value")
if clusterType == "rosa_mc" {
checkIfMetricValueExists(oc, token, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=topk(1,cluster_version{type="cluster"})' `, monSvcName, namespace), platformLoadTime)
} else {
checkIfMetricValueExists(oc, token, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=version' `, "example-app-monitoring-stack-prometheus", namespace), platformLoadTime)
}
}
func createCustomMonitoringStack(oc *exutil.CLI, oboBaseDir string) {
exutil.By("Create Clustom Monitoring Stack")
createStack(oc, monitoringStackDescription{}, monitoringStackSecretDescription{}, "monitor_example_app", oboBaseDir)
}
func checkExampleAppStatus(oc *exutil.CLI, ns string) {
exutil.By("Check the status of Example App")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
exutil.By("Get the pod name")
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", ns, "-l", "app=prometheus-example-app", "-oname").Output()
if err != nil {
return false, nil
}
exutil.By("Check the status of pod")
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(podName, "-n", ns, "-o=jsonpath={.status.phase}").Output()
if err != nil {
return false, nil
}
exutil.By("Check service is present")
svcName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, "-l", "app=prometheus-example-app", "-oname").Output()
if err != nil {
return false, nil
}
e2e.Logf("Service: %v", svcName)
exutil.By("Check service monitor is present")
svMonName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("servicemonitor.monitoring.rhobs", "-n", ns, "-l", "k8s-app=prometheus-example-monitor", "-oname").Output()
if err != nil {
return false, nil
}
e2e.Logf("Service Monitor: %v", svMonName)
if status != "Running" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Example app status is not healthy in %s namespace", ns))
}
func createExampleApp(oc *exutil.CLI, oboBaseDir, ns string) {
appTemplate := filepath.Join(oboBaseDir, "example-app.yaml")
exutil.By("Install Example App")
createResourceFromYaml(oc, ns, appTemplate)
checkExampleAppStatus(oc, ns)
}
func rapidastScan(oc *exutil.CLI, ns, configFile string, scanPolicyFile string, apiGroupName string) (bool, error) {
//update the token and create a new config file
content, err := os.ReadFile(configFile)
if err != nil {
return false, err
}
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
token := getSAToken(oc, "default", ns)
originConfig := string(content)
targetConfig := strings.Replace(originConfig, "Bearer sha256~xxxxxxxx", "Bearer "+token, -1)
newConfigFile := "/tmp/coodast" + getRandomString()
f, err := os.Create(newConfigFile)
defer f.Close()
defer exec.Command("rm", newConfigFile).Output()
if err != nil {
return false, err
}
f.WriteString(targetConfig)
//Create configmap
err = oc.WithoutNamespace().Run("create").Args("-n", ns, "configmap", "rapidast-configmap", "--from-file=rapidastconfig.yaml="+newConfigFile, "--from-file=customscan.policy="+scanPolicyFile).Execute()
if err != nil {
return false, err
}
//Create job
oboBaseDir := exutil.FixturePath("testdata", "monitoring", "observabilityoperator")
jobTemplate := filepath.Join(oboBaseDir, "rapidast-coo-job.yaml")
err = oc.WithoutNamespace().Run("create").Args("-n", ns, "-f", jobTemplate).Execute()
if err != nil {
return false, err
}
//Waiting up to 10 minutes until pod Failed or Success
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 10*time.Minute, true, func(context.Context) (done bool, err error) {
jobStatus, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ns, "pod", "-l", "job-name=rapidast-coo-job", "-ojsonpath={.items[0].status.phase}").Output()
e2e.Logf(" rapidast Job status %s ", jobStatus)
if err1 != nil {
return false, nil
}
if jobStatus == "Pending" || jobStatus == "Running" {
return false, nil
}
if jobStatus == "Failed" {
return true, fmt.Errorf("rapidast-coo-job status failed")
}
return jobStatus == "Succeeded", nil
})
//return if the pod status is not Succeeded
if err != nil {
return false, err
}
// Get the rapidast pod name
jobPods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=rapidast-coo-job"})
if err != nil {
return false, err
}
podLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ns, jobPods.Items[0].Name).Output()
//return if failed to get logs
if err != nil {
return false, err
}
//remove token from pod logs
podLogsNoToken := strings.Replace(podLogs, token, "xxxxxxxx", -1)
podLogsNoBearer := strings.Replace(podLogsNoToken, "Bearer ", "bbbbbb ", -1)
// Copy DAST Report into $ARTIFACT_DIR
artifactAvaiable := true
artifactdirPath := os.Getenv("ARTIFACT_DIR")
if artifactdirPath == "" {
artifactAvaiable = false
}
info, err := os.Stat(artifactdirPath)
if err != nil {
e2e.Logf("%s doesn't exist", artifactdirPath)
artifactAvaiable = false
} else if !info.IsDir() {
e2e.Logf("%s isn't a directory", artifactdirPath)
artifactAvaiable = false
}
if artifactAvaiable {
rapidastResultsSubDir := artifactdirPath + "/rapiddastresultsCOO"
err = os.MkdirAll(rapidastResultsSubDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
artifactFile := rapidastResultsSubDir + "/" + apiGroupName + "_rapidast.result"
e2e.Logf("Write report into %s", artifactFile)
f1, err := os.Create(artifactFile)
defer f1.Close()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = f1.WriteString(podLogsNoBearer)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
// print pod logs if artifactdirPath is not writable
e2e.Logf("#oc logs -n %s %s \n %s", jobPods.Items[0].Name, ns, podLogsNoBearer)
}
//return false, if high risk is reported
podLogA := strings.Split(podLogs, "\n")
riskHigh := 0
riskMedium := 0
re1 := regexp.MustCompile(`"riskdesc": .*High`)
re2 := regexp.MustCompile(`"riskdesc": .*Medium`)
for _, item := range podLogA {
if re1.MatchString(item) {
riskHigh++
}
if re2.MatchString(item) {
riskMedium++
}
}
e2e.Logf("rapidast result: riskHigh=%v riskMedium=%v", riskHigh, riskMedium)
if riskHigh > 0 {
return false, fmt.Errorf("high risk alert, please check the scan result report")
}
return true, nil
}
func ifMonitoringStackCRDExists(oc *exutil.CLI) bool {
var monitoringstackCRD = "monitoringstacks.monitoring.rhobs"
exutil.By("Check if the monitoringstack crd exists")
_, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd", monitoringstackCRD).Output()
if err != nil {
e2e.Logf("%s doesn't exist", monitoringstackCRD)
return false
}
e2e.Logf("%s exist", monitoringstackCRD)
return true
}
|
package monitoring
| ||||
function
|
openshift/openshift-tests-private
|
f9523c07-1e2f-40f9-8441-eadd9ee9fb2d
|
checkSubscription
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkSubscription(oc *exutil.CLI) (out string, err error) {
exutil.By("Check the state of Operator")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("subscription", subName, "-n", namespace, "-o=jsonpath={.status.state}").Output()
if strings.Contains(out, "NotFound") || strings.Contains(out, "No resources") || err != nil {
return false, err
}
if strings.Compare(out, "AtLatestKnown") == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Subscription %v doesnot contain the correct status in namespace %v", subName, namespace))
exutil.By("Get ClusterServiceVersion name")
csvName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("subscription", subName, "-n", namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check that ClusterServiceVersion " + csvName + " is finished")
errCheck = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterserviceversions", csvName, "-n", namespace, "-o=jsonpath={.status.phase}{.status.reason}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(out, "SucceededInstallSucceeded") == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ClusterServiceVersion %v is not successfully finished in namespace %v with error: %v", csvName, namespace, err))
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("subscription", subName, "-n", namespace, "--no-headers").Output()
return out, err
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
9d4d005f-2e4e-4f7f-87f3-0d5264870d89
|
createOperator
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createOperator(oc *exutil.CLI, ogTemplate string, subTemplate string, nsTemplate string) {
exutil.By("Create Namespace")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", nsTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
exutil.By("Create Operator Group")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", ogTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
exutil.By("Create subscription")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
out, err := checkSubscription(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Output: %v", out)
}
|
monitoring
| |||||
function
|
openshift/openshift-tests-private
|
db7f18ef-2f69-475c-9125-8ae27c29aafd
|
createObservabilityOperator
|
['"path/filepath"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createObservabilityOperator(oc *exutil.CLI, oboBaseDir string) {
ogTemplate := filepath.Join(oboBaseDir, "operator-group.yaml")
subTemplate := filepath.Join(oboBaseDir, "subscription.yaml")
nsTemplate := filepath.Join(oboBaseDir, "namespace.yaml")
exutil.By("Install Observability Operator")
createOperator(oc, ogTemplate, subTemplate, nsTemplate)
exutil.By("create servicemonitor")
smTemplate := filepath.Join(oboBaseDir, "obo-service-monitor.yaml")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", smTemplate).Output()
e2e.Logf("err %v, msg %v", err, msg)
cooVersion, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-o=jsonpath={.items[?(@.spec.displayName==\"Cluster Observability Operator\")].metadata.name}", "-n", "openshift-monitoring").Output()
waitErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("csv/"+cooVersion, "--for=jsonpath={.status.phase}=Succeeded", "--timeout=5m", "-n", "openshift-monitoring").Execute()
if waitErr != nil {
g.Skip("COO is not ready or been installed")
}
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
ef97dbd1-959a-49b7-acd1-99ea53eae4a6
|
getClusterDetails
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func getClusterDetails(oc *exutil.CLI) (clusterID string, region string) {
cluserID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversions", "version", "-o=jsonpath={.spec.clusterID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus..region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return cluserID, region
}
|
monitoring
| |||||
function
|
openshift/openshift-tests-private
|
bc79ca9e-542c-48c8-b291-9dc6889a9f0a
|
createMonitoringStack
|
['monitoringStackSecretDescription', 'monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createMonitoringStack(oc *exutil.CLI, msD monitoringStackDescription, secD monitoringStackSecretDescription) {
exutil.By("Creating Monitoring Stack")
createStack(oc, msD, secD, "rosa_mc", "")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
ee3742a0-0a56-4a44-b0dc-5424ba455f2f
|
createStack
|
['"path/filepath"', '"strings"']
|
['monitoringStackSecretDescription', 'monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createStack(oc *exutil.CLI, msD monitoringStackDescription, secD monitoringStackSecretDescription, stack, oboBaseDir string) {
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
exutil.By("Creating Secret")
secFile, err := oc.AsAdmin().Run("process").Args("-f", secD.template, "-p", "NAME="+secD.name, "NAMESPACE="+secD.namespace).OutputToFile(getRandomString() + "ms-secret.json")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", secFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Install Monitoring Stack")
msFile, err := oc.AsAdmin().Run("process").Args("-f", msD.template, "-p", "CLUSTERID="+msD.clusterID, "REGION="+msD.region, "NAME="+msD.name, "NAMESPACE="+msD.namespace, "SECRETNAME="+msD.secretName, "TOKENURL="+msD.tokenURL, "URL="+msD.url).OutputToFile(getRandomString() + "ms.json")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", msFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
if stack == "monitor_example_app" {
exutil.By("Install Monitoring Stack")
var msTemplate string
if exutil.IsSNOCluster(oc) {
msTemplate = filepath.Join(oboBaseDir, "example-app-monitoring-stack-sno.yaml")
} else {
msTemplate = filepath.Join(oboBaseDir, "example-app-monitoring-stack.yaml")
}
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", msTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Check MonitoringStack status")
checkMonitoringStack(oc, msD, stack)
exutil.By("Check MonitoringStack Prometheus pods status")
checkMonitoringStackPods(oc, stack)
}
|
monitoring
| |||
function
|
openshift/openshift-tests-private
|
5951a507-e75a-41f6-959e-e2fbf6161ea0
|
checkMonitoringStack
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkMonitoringStack(oc *exutil.CLI, msD monitoringStackDescription, stack string) {
var name string
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
name = msD.name
}
if stack == "monitor_example_app" {
name = "example-app-monitoring-stack"
}
exutil.By("Check the state of MonitoringStack")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", name, "-n", namespace, "-o=jsonpath={.status.conditions[*].reason}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(out, "MonitoringStackAvailable") {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Monitoring Stack %v doesnot contain the correct status in namespace %v", name, namespace))
}
|
monitoring
| |||
function
|
openshift/openshift-tests-private
|
c2cd901f-ed01-4e2d-a995-c102954e3a03
|
checkMonitoringStackPods
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkMonitoringStackPods(oc *exutil.CLI, stack string) {
exutil.By("Check " + namespace + " namespace monitoringstack pods liveliness")
var name string
if stack == "rosa_mc" {
name = "hypershift-monitoring-stack"
}
if stack == "monitor_example_app" {
name = "example-app-monitoring-stack"
}
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", "prometheus="+name, "-o=jsonpath={.items[*].status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if exutil.IsSNOCluster(oc) {
if strings.Compare(out, "Running") == 0 {
return true, nil
}
} else {
if strings.Compare(out, "Running Running") == 0 {
return true, nil
}
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v namespace monitoringstack pods are not in healthy state", namespace))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
5aa953f0-9448-4433-84c3-89f646e64028
|
checkOperatorPods
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkOperatorPods(oc *exutil.CLI) {
exutil.By("Check " + namespace + " namespace pods liveliness")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-o", "jsonpath={.items[*].status.phase}").Output()
if strings.Compare(out, "Running Running Running Running") == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v namespace does not contain pods", namespace))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
f9fb3d7e-cfeb-4bcd-b439-5d7624590956
|
checkRemoteWriteConfig
|
['"context"', '"fmt"', '"time"', '"reflect"', '"github.com/tidwall/gjson"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkRemoteWriteConfig(oc *exutil.CLI, msD monitoringStackDescription) {
var (
actual interface{}
expected interface{}
remoteWriteExpected = fmt.Sprintf(`[
{
"oauth2": {
"clientId": {
"secret": {
"key": "client-id",
"name": "%v"
}
},
"clientSecret": {
"key": "client-secret",
"name": "%v"
},
"tokenUrl": "%v"
},
"url": "%v"
}
]`, msD.secretName, msD.secretName, msD.tokenURL, msD.url)
)
exutil.By("Check remote write config")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", msD.name, "-n", msD.namespace, "-o=jsonpath={.spec.prometheusConfig.remoteWrite}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
actual = gjson.Parse(out).Value()
expected = gjson.Parse(remoteWriteExpected).Value()
if reflect.DeepEqual(actual, expected) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Remote write config is not correct in monitoringstack %v in %v namespace", msD.name, msD.namespace))
}
|
monitoring
| |||
function
|
openshift/openshift-tests-private
|
9f15dd36-42bc-42d2-b4eb-cfa044d0be88
|
checkMonitoringStackDetails
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkMonitoringStackDetails(oc *exutil.CLI, msD monitoringStackDescription, stack string) {
var name string
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
name = msD.name
exutil.By("Get clusterID and region")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", msD.name, "-n", msD.namespace, "-o=jsonpath={.spec.prometheusConfig.externalLabels.hypershift_cluster_id}{.spec.prometheusConfig.externalLabels.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(out, msD.clusterID+msD.region) == 0 {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ClusterID and region did not match. Expected: %v %v", msD.clusterID, msD.region))
}
if stack == "custom" {
name = "hypershift-monitoring-stack"
}
exutil.By("Check status of MonitoringStack")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("monitoringstack", name, "-n", namespace, "-o=jsonpath={.status.conditions[*].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(out, "False") {
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("MonitoringStack %v reports invalid status in namespace %v", name, namespace))
}
|
monitoring
| |||
function
|
openshift/openshift-tests-private
|
88ea8b57-0631-41c1-bf98-121152645019
|
deleteMonitoringStack
|
['"strings"']
|
['monitoringStackSecretDescription', 'monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func deleteMonitoringStack(oc *exutil.CLI, msD monitoringStackDescription, secD monitoringStackSecretDescription, stack string) {
stack = strings.ToLower(stack)
if stack == "rosa_mc" {
exutil.By("Removing MonitoringStack " + msD.name)
errStack := oc.AsAdmin().WithoutNamespace().Run("delete").Args("monitoringstack", msD.name, "-n", msD.namespace).Execute()
exutil.By("Removing MonitoringStack Secret " + secD.name)
errSecret := oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", secD.name, "-n", secD.namespace).Execute()
o.Expect(errStack).NotTo(o.HaveOccurred())
o.Expect(errSecret).NotTo(o.HaveOccurred())
}
if stack == "monitor_example_app" {
exutil.By("Removing MonitoringStack hypershift-monitoring-stack")
errStack := oc.AsAdmin().WithoutNamespace().Run("delete").Args("monitoringstack", "example-app-monitoring-stack", "-n", "openshift-observability-operator").Execute()
o.Expect(errStack).NotTo(o.HaveOccurred())
}
}
|
monitoring
| |||
function
|
openshift/openshift-tests-private
|
53f79518-76ff-49a6-9a4b-ccf9401a233f
|
deleteOperator
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func deleteOperator(oc *exutil.CLI) {
exutil.By("Removing servicemoitor")
errSm := oc.AsAdmin().WithoutNamespace().Run("delete").Args("servicemonitors.monitoring.coreos.com", "observability-operator", "-n", namespace).Execute()
exutil.By("Removing ClusterServiceVersion " + csvName)
errCsv := oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterserviceversions", csvName, "-n", namespace).Execute()
exutil.By("Removing Subscription " + subName)
errSub := oc.AsAdmin().WithoutNamespace().Run("delete").Args("subscription", subName, "-n", namespace).Execute()
exutil.By("Removing OperatorGroup " + ogName)
errOg := oc.AsAdmin().WithoutNamespace().Run("delete").Args("operatorgroup", ogName, "-n", namespace).Execute()
exutil.By("Removing Namespace " + namespace)
errNs := oc.AsAdmin().WithoutNamespace().Run("delete").Args("namespace", namespace, "--force").Execute()
crds, err := oc.AsAdmin().WithoutNamespace().Run("api-resources").Args("--api-group=monitoring.rhobs", "-o", "name").Output()
if err != nil {
e2e.Logf("err %v, crds %v", err, crds)
} else {
crda := append([]string{"crd"}, strings.Split(crds, "\n")...)
errCRD := oc.AsAdmin().WithoutNamespace().Run("delete").Args(crda...).Execute()
o.Expect(errCRD).NotTo(o.HaveOccurred())
}
o.Expect(errSm).NotTo(o.HaveOccurred())
o.Expect(errCsv).NotTo(o.HaveOccurred())
o.Expect(errSub).NotTo(o.HaveOccurred())
o.Expect(errOg).NotTo(o.HaveOccurred())
o.Expect(errNs).NotTo(o.HaveOccurred())
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
c65a99d1-6660-4f21-869e-ae35fe337d50
|
checkRuleExists
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"github.com/tidwall/gjson"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkRuleExists(oc *exutil.CLI, token, routeName, namespace, ruleName string) bool {
var rules []gjson.Result
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
path, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", routeName, "-n", namespace, "-o=jsonpath={.spec.path}").Output()
if err != nil {
return false, nil
}
host, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", routeName, "-n", namespace, "-o=jsonpath={.spec.host}").Output()
if err != nil {
return false, nil
}
ruleCmd := fmt.Sprintf("curl -G -s -k -H\"Authorization: Bearer %s\" https://%s%s/v1/rules", token, host, path)
out, err := exec.Command("bash", "-c", ruleCmd).Output()
if err != nil {
return false, nil
}
rules = gjson.ParseBytes(out).Get("data.groups.#.file").Array()
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Rules are not loaded")
for _, rule := range rules {
if strings.Contains(rule.String(), ruleName) {
return true
}
}
return false
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
23957242-9be1-4d33-8806-08bcb43f596d
|
checkConfigMapExists
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkConfigMapExists(oc *exutil.CLI, namespace, configmapName, checkStr string) bool {
searchOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", configmapName, "-n", namespace, "-o=jsonpath={.data.config\\.yaml}").Output()
if err != nil {
return false
}
if strings.Contains(searchOutput, checkStr) {
return true
}
return false
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
44ef745c-09b4-42a6-a2f6-bebb67aeefce
|
createConfig
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createConfig(oc *exutil.CLI, namespace, cmName, config string) {
if !checkConfigMapExists(oc, namespace, cmName, "enableUserWorkload: true") {
e2e.Logf("Create configmap: user-workload-monitoring-config")
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", config).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
2ccf8535-2835-4627-a1b9-4fe444ec38d1
|
checkOperatorMonitoring
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkOperatorMonitoring(oc *exutil.CLI, oboBaseDir string) {
exutil.By("Check if UWM exists")
uwMonitoringConfig := filepath.Join(oboBaseDir, "user-workload-monitoring-cm.yaml")
createConfig(oc, "openshift-monitoring", "cluster-monitoring-config", uwMonitoringConfig)
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check prometheus rules")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrule", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(out, "alertmanager-rules") && strings.Contains(out, "prometheus-operator-rules") && strings.Contains(out, "prometheus-rules") && strings.Contains(out, "observability-operator-rules") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Prometheus rules are not created in %v namespace", namespace))
exutil.By("Check Observability Operator Alertmanager Rules")
errCheck = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
IsAlertManagerRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-alertmanager-rules")
exutil.By("Check Observability Operator Prometheus Operator Rules")
IsPrometheusOperatorRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-prometheus-operator-rules")
exutil.By("Check Observability Operator Prometheus Rules")
IsPrometheusRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-prometheus-rules")
exutil.By("Check Observability Operator Rules")
IsOperatorRule := checkRuleExists(oc, token, "thanos-querier", "openshift-monitoring", "openshift-observability-operator-observability-operator-rules")
if IsAlertManagerRule && IsPrometheusOperatorRule && IsPrometheusRule && IsOperatorRule {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Observability operator rules are not loaded")
exutil.By("Check Observability Operator metrics")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query={__name__=~"controller_runtime_reconcile.*",job="observability-operator",namespace="openshift-observability-operator"}'`, token, "openshift-observability-operator", uwmLoadTime)
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
66e59701-d20d-4ff1-a4a8-ec481f6d1f49
|
checkLabel
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkLabel(oc *exutil.CLI) {
var labelName = "network.openshift.io/policy-group=monitoring"
exutil.By("Check if the label" + labelName + "exists in the namespace" + namespace)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", namespace, "-o=jsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(out, "monitoring")).To(o.BeTrue())
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
a2722f57-6692-4777-8754-964497dee026
|
checkPodHealth
|
['"context"', '"time"', '"reflect"', '"github.com/tidwall/gjson"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkPodHealth(oc *exutil.CLI) {
var (
actualLiveness interface{}
actualReadiness interface{}
outputLiveness = `{
"failureThreshold": 3,
"httpGet": {
"path": "/healthz",
"port": 8081,
"scheme": "HTTP"
},
"periodSeconds": 10,
"successThreshold": 1,
"timeoutSeconds": 1
}`
outputReadiness = `{
"failureThreshold": 3,
"httpGet": {
"path": "/healthz",
"port": 8081,
"scheme": "HTTP"
},
"periodSeconds": 10,
"successThreshold": 1,
"timeoutSeconds": 1
}`
expectedLiveness = gjson.Parse(outputLiveness).Value()
expectedReadiness = gjson.Parse(outputReadiness).Value()
)
exutil.By("Check remote write config")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
exutil.By("Get the observability operator pod")
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", "app.kubernetes.io/name=observability-operator", "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the liveliness for " + podName)
livenessOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(podName, "-n", namespace, "-o=jsonpath={.spec.containers[].livenessProbe}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
readinessOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(podName, "-n", namespace, "-o=jsonpath={.spec.containers[].readinessProbe}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Live: %v", livenessOut)
e2e.Logf("Ready: %v", readinessOut)
actualLiveness = gjson.Parse(livenessOut).Value()
actualReadiness = gjson.Parse(readinessOut).Value()
if reflect.DeepEqual(actualLiveness, expectedLiveness) && reflect.DeepEqual(actualReadiness, expectedReadiness) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, "liveness/readiness probe not implemented correctly in observability operator pod")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
457e9f57-d103-49a2-8b97-9e06c3753c27
|
checkHCPTargets
|
['"fmt"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkHCPTargets(oc *exutil.CLI) {
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check whether the scrape targets are present")
for _, target := range targets {
checkMetric(oc, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=prometheus_sd_discovered_targets{config=~".*%s.*"}' `, monSvcName, namespace, target), token, target, platformLoadTime)
}
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
1448d091-35b3-436d-9048-3cd2e2db1815
|
checkExampleAppTarget
|
['"fmt"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkExampleAppTarget(oc *exutil.CLI) {
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check whether the scrape targets are present")
checkMetric(oc, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=prometheus_sd_discovered_targets{config=~".*%s.*"}' `, "example-app-monitoring-stack-prometheus", namespace, "prometheus-example-monitor"), token, "prometheus-example-monitor", uwmLoadTime)
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
3cc21870-dcf4-4452-9c2f-db3cd58081ec
|
checkIfMetricValueExists
|
['"context"', '"fmt"', '"time"', '"github.com/tidwall/gjson"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkIfMetricValueExists(oc *exutil.CLI, token, url string, timeout time.Duration) {
var (
res string
err error
)
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
res, err = exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", getCmd)
val := gjson.Parse(res).Get("data.result.#.value").Array()
if err != nil || len(val) == 0 {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metric %s does not contain any value", res))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
9c5ce657-7cb1-4041-a2b4-2a73241f9bea
|
checkMetricValue
|
['"fmt"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkMetricValue(oc *exutil.CLI, clusterType string) {
exutil.By("Get SA token")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check the metrics exists and contain value")
if clusterType == "rosa_mc" {
checkIfMetricValueExists(oc, token, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=topk(1,cluster_version{type="cluster"})' `, monSvcName, namespace), platformLoadTime)
} else {
checkIfMetricValueExists(oc, token, fmt.Sprintf(`http://%s.%s.svc.cluster.local:9090/api/v1/query --data-urlencode 'query=version' `, "example-app-monitoring-stack-prometheus", namespace), platformLoadTime)
}
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
1f94370f-7e13-4fca-a4be-c00a6a6f61f9
|
createCustomMonitoringStack
|
['monitoringStackSecretDescription', 'monitoringStackDescription']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createCustomMonitoringStack(oc *exutil.CLI, oboBaseDir string) {
exutil.By("Create Clustom Monitoring Stack")
createStack(oc, monitoringStackDescription{}, monitoringStackSecretDescription{}, "monitor_example_app", oboBaseDir)
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
380b1587-d905-4c96-abff-e013dea4d2af
|
checkExampleAppStatus
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func checkExampleAppStatus(oc *exutil.CLI, ns string) {
exutil.By("Check the status of Example App")
errCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) {
exutil.By("Get the pod name")
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", ns, "-l", "app=prometheus-example-app", "-oname").Output()
if err != nil {
return false, nil
}
exutil.By("Check the status of pod")
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(podName, "-n", ns, "-o=jsonpath={.status.phase}").Output()
if err != nil {
return false, nil
}
exutil.By("Check service is present")
svcName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, "-l", "app=prometheus-example-app", "-oname").Output()
if err != nil {
return false, nil
}
e2e.Logf("Service: %v", svcName)
exutil.By("Check service monitor is present")
svMonName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("servicemonitor.monitoring.rhobs", "-n", ns, "-l", "k8s-app=prometheus-example-monitor", "-oname").Output()
if err != nil {
return false, nil
}
e2e.Logf("Service Monitor: %v", svMonName)
if status != "Running" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Example app status is not healthy in %s namespace", ns))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
9a6eae0e-c09b-4370-ad2c-daa4257d107c
|
createExampleApp
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func createExampleApp(oc *exutil.CLI, oboBaseDir, ns string) {
appTemplate := filepath.Join(oboBaseDir, "example-app.yaml")
exutil.By("Install Example App")
createResourceFromYaml(oc, ns, appTemplate)
checkExampleAppStatus(oc, ns)
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
32e5267e-3bab-49e7-a9be-7625b4d6236e
|
rapidastScan
|
['"context"', '"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func rapidastScan(oc *exutil.CLI, ns, configFile string, scanPolicyFile string, apiGroupName string) (bool, error) {
//update the token and create a new config file
content, err := os.ReadFile(configFile)
if err != nil {
return false, err
}
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
token := getSAToken(oc, "default", ns)
originConfig := string(content)
targetConfig := strings.Replace(originConfig, "Bearer sha256~xxxxxxxx", "Bearer "+token, -1)
newConfigFile := "/tmp/coodast" + getRandomString()
f, err := os.Create(newConfigFile)
defer f.Close()
defer exec.Command("rm", newConfigFile).Output()
if err != nil {
return false, err
}
f.WriteString(targetConfig)
//Create configmap
err = oc.WithoutNamespace().Run("create").Args("-n", ns, "configmap", "rapidast-configmap", "--from-file=rapidastconfig.yaml="+newConfigFile, "--from-file=customscan.policy="+scanPolicyFile).Execute()
if err != nil {
return false, err
}
//Create job
oboBaseDir := exutil.FixturePath("testdata", "monitoring", "observabilityoperator")
jobTemplate := filepath.Join(oboBaseDir, "rapidast-coo-job.yaml")
err = oc.WithoutNamespace().Run("create").Args("-n", ns, "-f", jobTemplate).Execute()
if err != nil {
return false, err
}
//Waiting up to 10 minutes until pod Failed or Success
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 10*time.Minute, true, func(context.Context) (done bool, err error) {
jobStatus, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ns, "pod", "-l", "job-name=rapidast-coo-job", "-ojsonpath={.items[0].status.phase}").Output()
e2e.Logf(" rapidast Job status %s ", jobStatus)
if err1 != nil {
return false, nil
}
if jobStatus == "Pending" || jobStatus == "Running" {
return false, nil
}
if jobStatus == "Failed" {
return true, fmt.Errorf("rapidast-coo-job status failed")
}
return jobStatus == "Succeeded", nil
})
//return if the pod status is not Succeeded
if err != nil {
return false, err
}
// Get the rapidast pod name
jobPods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=rapidast-coo-job"})
if err != nil {
return false, err
}
podLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ns, jobPods.Items[0].Name).Output()
//return if failed to get logs
if err != nil {
return false, err
}
//remove token from pod logs
podLogsNoToken := strings.Replace(podLogs, token, "xxxxxxxx", -1)
podLogsNoBearer := strings.Replace(podLogsNoToken, "Bearer ", "bbbbbb ", -1)
// Copy DAST Report into $ARTIFACT_DIR
artifactAvaiable := true
artifactdirPath := os.Getenv("ARTIFACT_DIR")
if artifactdirPath == "" {
artifactAvaiable = false
}
info, err := os.Stat(artifactdirPath)
if err != nil {
e2e.Logf("%s doesn't exist", artifactdirPath)
artifactAvaiable = false
} else if !info.IsDir() {
e2e.Logf("%s isn't a directory", artifactdirPath)
artifactAvaiable = false
}
if artifactAvaiable {
rapidastResultsSubDir := artifactdirPath + "/rapiddastresultsCOO"
err = os.MkdirAll(rapidastResultsSubDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
artifactFile := rapidastResultsSubDir + "/" + apiGroupName + "_rapidast.result"
e2e.Logf("Write report into %s", artifactFile)
f1, err := os.Create(artifactFile)
defer f1.Close()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = f1.WriteString(podLogsNoBearer)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
// print pod logs if artifactdirPath is not writable
e2e.Logf("#oc logs -n %s %s \n %s", jobPods.Items[0].Name, ns, podLogsNoBearer)
}
//return false, if high risk is reported
podLogA := strings.Split(podLogs, "\n")
riskHigh := 0
riskMedium := 0
re1 := regexp.MustCompile(`"riskdesc": .*High`)
re2 := regexp.MustCompile(`"riskdesc": .*Medium`)
for _, item := range podLogA {
if re1.MatchString(item) {
riskHigh++
}
if re2.MatchString(item) {
riskMedium++
}
}
e2e.Logf("rapidast result: riskHigh=%v riskMedium=%v", riskHigh, riskMedium)
if riskHigh > 0 {
return false, fmt.Errorf("high risk alert, please check the scan result report")
}
return true, nil
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
d5d931c4-b435-4010-a8b1-79f606c56b35
|
ifMonitoringStackCRDExists
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator_utils.go
|
func ifMonitoringStackCRDExists(oc *exutil.CLI) bool {
var monitoringstackCRD = "monitoringstacks.monitoring.rhobs"
exutil.By("Check if the monitoringstack crd exists")
_, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd", monitoringstackCRD).Output()
if err != nil {
e2e.Logf("%s doesn't exist", monitoringstackCRD)
return false
}
e2e.Logf("%s exist", monitoringstackCRD)
return true
}
|
monitoring
| |||||
file
|
openshift/openshift-tests-private
|
429a766c-ad3f-48fc-983f-fbb68a8c28bd
|
alerts
|
import (
"context"
"encoding/json"
"fmt"
"time"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/alerts.go
|
package netobserv
import (
"context"
"encoding/json"
"fmt"
"time"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
func getConfiguredAlertRules(oc *exutil.CLI, ruleName string, namespace string) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", ruleName, "-o=jsonpath='{.spec.groups[*].rules[*].alert}'", "-n", namespace).Output()
}
func getAlertStatus(oc *exutil.CLI, alertName string) (map[string]interface{}, error) {
alertOut, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "alertmanager-main-0", "--", "amtool", "--alertmanager.url", "http://localhost:9093", "alert", "query", alertName, "-o", "json").Output()
if err != nil {
return make(map[string]interface{}), err
}
var alertStatus []interface{}
json.Unmarshal([]byte(alertOut), &alertStatus)
if len(alertStatus) == 0 {
return make(map[string]interface{}), nil
}
return alertStatus[0].(map[string]interface{}), nil
}
func waitForAlertToBeActive(oc *exutil.CLI, alertName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 900*time.Second, false, func(context.Context) (done bool, err error) {
alertStatus, err := getAlertStatus(oc, alertName)
if err != nil {
return false, err
}
if len(alertStatus) == 0 {
return false, nil
}
return alertStatus["status"].(map[string]interface{})["state"] == "active", nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s Alert did not become active", alertName))
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
32c66ea4-1ad5-42c0-baf6-0c8afa2e8eb3
|
getConfiguredAlertRules
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/alerts.go
|
func getConfiguredAlertRules(oc *exutil.CLI, ruleName string, namespace string) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", ruleName, "-o=jsonpath='{.spec.groups[*].rules[*].alert}'", "-n", namespace).Output()
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
a5ffd1ce-2359-4f5c-ae48-9b2402e7caef
|
getAlertStatus
|
['"encoding/json"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/alerts.go
|
func getAlertStatus(oc *exutil.CLI, alertName string) (map[string]interface{}, error) {
alertOut, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "alertmanager-main-0", "--", "amtool", "--alertmanager.url", "http://localhost:9093", "alert", "query", alertName, "-o", "json").Output()
if err != nil {
return make(map[string]interface{}), err
}
var alertStatus []interface{}
json.Unmarshal([]byte(alertOut), &alertStatus)
if len(alertStatus) == 0 {
return make(map[string]interface{}), nil
}
return alertStatus[0].(map[string]interface{}), nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
aecb31c8-22d7-4755-b143-68e721c1772c
|
waitForAlertToBeActive
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/alerts.go
|
func waitForAlertToBeActive(oc *exutil.CLI, alertName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 900*time.Second, false, func(context.Context) (done bool, err error) {
alertStatus, err := getAlertStatus(oc, alertName)
if err != nil {
return false, err
}
if len(alertStatus) == 0 {
return false, nil
}
return alertStatus["status"].(map[string]interface{})["state"] == "active", nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s Alert did not become active", alertName))
}
|
netobserv
| ||||
file
|
openshift/openshift-tests-private
|
aad9c1ed-b685-4944-9d96-93bef43a0ace
|
console_plugin
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/console_plugin.go
|
package netobserv
type consoleplugin struct {
Image string `json:"image"`
Port int `json:"port"`
}
// place holder for console plugin functions
|
package netobserv
| |||||
file
|
openshift/openshift-tests-private
|
ad1d8bf0-285a-4019-bc6b-e7314f993e4f
|
custom_metrics
|
import (
"fmt"
"os"
"reflect"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"gopkg.in/yaml.v3"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/custom_metrics.go
|
package netobserv
import (
"fmt"
"os"
"reflect"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"gopkg.in/yaml.v3"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type CustomMetrics struct {
Namespace string
Template string
}
type CustomMetricsTemplateConfig struct {
Objects []interface{} `yaml:"objects"`
}
type CustomMetricsConfig struct {
DashboardNames []string
MetricName string
Queries []string
}
// create flowmetrics resource from template
func (cm CustomMetrics) createCustomMetrics(oc *exutil.CLI) {
parameters := []string{"--ignore-unknown-parameters=true", "-f", cm.Template, "-p"}
cmr := reflect.ValueOf(&cm).Elem()
for i := 0; i < cmr.NumField(); i++ {
if cmr.Field(i).Interface() != "" {
if cmr.Type().Field(i).Name != "Template" {
parameters = append(parameters, fmt.Sprintf("%s=%s", cmr.Type().Field(i).Name, cmr.Field(i).Interface()))
}
}
}
exutil.ApplyNsResourceFromTemplate(oc, cm.Namespace, parameters...)
}
// parse custom metrics yaml template
func (cm CustomMetrics) parseTemplate() *CustomMetricsTemplateConfig {
yamlFile, err := os.ReadFile(cm.Template)
if err != nil {
e2e.Failf("Could not read the template file %s", cm.Template)
}
var cmc *CustomMetricsTemplateConfig
err = yaml.Unmarshal(yamlFile, &cmc)
if err != nil {
e2e.Failf("Could not Unmarshal %v", err)
}
return cmc
}
// returns queries and dashboardNames
func getChartsConfig(chartsConfig []interface{}) ([]string, []string) {
var result []string
var dashboardNames []string
for _, conf := range chartsConfig {
chartsConf := conf.(map[string]interface{})
for k, v := range chartsConf {
if k == "dashboardName" {
dashboardNames = append(dashboardNames, v.(string))
}
if k == "queries" {
queries := v.([]interface{})
for _, qConf := range queries {
queryConf := qConf.(map[string]interface{})
for qk, qv := range queryConf {
if qk == "promQL" {
result = append(result, qv.(string))
}
}
}
}
}
}
return result, dashboardNames
}
// returns slice of CustomMetricsConfig
func (cm CustomMetrics) getCustomMetricConfigs() []CustomMetricsConfig {
cmc := cm.parseTemplate()
// var customMetricsConfig []map[string][]string
var cmConfigs []CustomMetricsConfig
for _, template := range cmc.Objects {
var cmConfig CustomMetricsConfig
t := template.(map[string]interface{})
for object, v := range t {
if object == "spec" {
spec := v.(map[string]interface{})
for config, val := range spec {
if config == "charts" {
chartsConfig := val.([]interface{})
cmConfig.Queries, cmConfig.DashboardNames = getChartsConfig(chartsConfig)
}
if config == "metricName" {
cmConfig.MetricName = val.(string)
}
}
cmConfigs = append(cmConfigs, cmConfig)
}
}
}
return cmConfigs
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
b18c76bc-689b-486e-8f59-b4b319dae8ae
|
createCustomMetrics
|
['"fmt"', '"reflect"']
|
['CustomMetrics']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/custom_metrics.go
|
func (cm CustomMetrics) createCustomMetrics(oc *exutil.CLI) {
parameters := []string{"--ignore-unknown-parameters=true", "-f", cm.Template, "-p"}
cmr := reflect.ValueOf(&cm).Elem()
for i := 0; i < cmr.NumField(); i++ {
if cmr.Field(i).Interface() != "" {
if cmr.Type().Field(i).Name != "Template" {
parameters = append(parameters, fmt.Sprintf("%s=%s", cmr.Type().Field(i).Name, cmr.Field(i).Interface()))
}
}
}
exutil.ApplyNsResourceFromTemplate(oc, cm.Namespace, parameters...)
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
24b62f91-5381-4aa7-9930-a6954be2adb6
|
parseTemplate
|
['"os"']
|
['CustomMetrics', 'CustomMetricsTemplateConfig']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/custom_metrics.go
|
func (cm CustomMetrics) parseTemplate() *CustomMetricsTemplateConfig {
yamlFile, err := os.ReadFile(cm.Template)
if err != nil {
e2e.Failf("Could not read the template file %s", cm.Template)
}
var cmc *CustomMetricsTemplateConfig
err = yaml.Unmarshal(yamlFile, &cmc)
if err != nil {
e2e.Failf("Could not Unmarshal %v", err)
}
return cmc
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
a19d86b1-7fde-49ef-8d17-bd98e2d58497
|
getChartsConfig
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/custom_metrics.go
|
func getChartsConfig(chartsConfig []interface{}) ([]string, []string) {
var result []string
var dashboardNames []string
for _, conf := range chartsConfig {
chartsConf := conf.(map[string]interface{})
for k, v := range chartsConf {
if k == "dashboardName" {
dashboardNames = append(dashboardNames, v.(string))
}
if k == "queries" {
queries := v.([]interface{})
for _, qConf := range queries {
queryConf := qConf.(map[string]interface{})
for qk, qv := range queryConf {
if qk == "promQL" {
result = append(result, qv.(string))
}
}
}
}
}
}
return result, dashboardNames
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
6758320f-2a39-4e14-95cb-70aecb783888
|
getCustomMetricConfigs
|
['CustomMetrics', 'CustomMetricsConfig']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/custom_metrics.go
|
func (cm CustomMetrics) getCustomMetricConfigs() []CustomMetricsConfig {
cmc := cm.parseTemplate()
// var customMetricsConfig []map[string][]string
var cmConfigs []CustomMetricsConfig
for _, template := range cmc.Objects {
var cmConfig CustomMetricsConfig
t := template.(map[string]interface{})
for object, v := range t {
if object == "spec" {
spec := v.(map[string]interface{})
for config, val := range spec {
if config == "charts" {
chartsConfig := val.([]interface{})
cmConfig.Queries, cmConfig.DashboardNames = getChartsConfig(chartsConfig)
}
if config == "metricName" {
cmConfig.MetricName = val.(string)
}
}
cmConfigs = append(cmConfigs, cmConfig)
}
}
}
return cmConfigs
}
|
netobserv
| ||||
file
|
openshift/openshift-tests-private
|
c80e3432-336d-4de3-bfea-310b6916351f
|
ip_utils
|
import (
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/ip_utils.go
|
package netobserv
import (
"strings"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
func checkIPStackType(oc *exutil.CLI) string {
svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 {
return "dualstack"
} else if strings.Count(svcNetwork, ":") >= 2 {
return "ipv6single"
} else if strings.Count(svcNetwork, ".") >= 2 {
return "ipv4single"
}
return ""
}
func getPodIPv4(oc *exutil.CLI, namespace string, podName string) string {
podIPv4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv4)
return podIPv4
}
func getPodIPv6(oc *exutil.CLI, namespace string, podName string, ipStack string) string {
if ipStack == "ipv6single" {
podIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv6)
return podIPv6
} else if ipStack == "dualstack" {
podIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[1].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv6)
return podIPv6
}
return ""
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
9268b46b-6a88-4f32-9cdf-83cabd576688
|
checkIPStackType
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/ip_utils.go
|
func checkIPStackType(oc *exutil.CLI) string {
svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 {
return "dualstack"
} else if strings.Count(svcNetwork, ":") >= 2 {
return "ipv6single"
} else if strings.Count(svcNetwork, ".") >= 2 {
return "ipv4single"
}
return ""
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
daa78eb3-fb18-40e3-a1e8-cc94d1ac946d
|
getPodIPv4
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/ip_utils.go
|
func getPodIPv4(oc *exutil.CLI, namespace string, podName string) string {
podIPv4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv4)
return podIPv4
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
d1aa1af8-aae1-442a-8fe9-cc03087941d1
|
getPodIPv6
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/ip_utils.go
|
func getPodIPv6(oc *exutil.CLI, namespace string, podName string, ipStack string) string {
if ipStack == "ipv6single" {
podIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv6)
return podIPv6
} else if ipStack == "dualstack" {
podIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[1].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv6)
return podIPv6
}
return ""
}
|
netobserv
| |||||
file
|
openshift/openshift-tests-private
|
30b1a25f-c10a-4751-bc77-3ebc782b4b4c
|
kafka
|
import (
"context"
"fmt"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
package netobserv
import (
"context"
"fmt"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// Kafka struct to handle default Kafka installation
type Kafka struct {
Name string
Namespace string
Template string
StorageClass string
}
// KafkaMetrics struct to handle kafka metrics config deployment
type KafkaMetrics struct {
Namespace string
Template string
}
// KafkaTopic struct handles creation of kafka topic
type KafkaTopic struct {
Namespace string
TopicName string
Name string
Template string
}
type KafkaUser struct {
Namespace string
UserName string
Name string
Template string
}
// deploys default Kafka
func (kafka *Kafka) deployKafka(oc *exutil.CLI) {
e2e.Logf("Deploy Default Kafka")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafka.Template, "-p", "NAMESPACE=" + kafka.Namespace}
if kafka.Name != "" {
parameters = append(parameters, "NAME="+kafka.Name)
}
if kafka.StorageClass != "" {
parameters = append(parameters, "STORAGE_CLASS="+kafka.StorageClass)
}
exutil.ApplyNsResourceFromTemplate(oc, kafka.Namespace, parameters...)
}
// deploys Kafka Metrics
func (kafkaMetrics *KafkaMetrics) deployKafkaMetrics(oc *exutil.CLI) {
e2e.Logf("Deploy Kafka metrics")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafkaMetrics.Template, "-p", "NAMESPACE=" + kafkaMetrics.Namespace}
exutil.ApplyNsResourceFromTemplate(oc, kafkaMetrics.Namespace, parameters...)
}
// creates a Kafka topic
func (kafkaTopic *KafkaTopic) deployKafkaTopic(oc *exutil.CLI) {
e2e.Logf("Create Kafka topic")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafkaTopic.Template, "-p", "NAMESPACE=" + kafkaTopic.Namespace}
if kafkaTopic.Name != "" {
parameters = append(parameters, "NAME="+kafkaTopic.Name)
}
if kafkaTopic.TopicName != "" {
parameters = append(parameters, "TOPIC="+kafkaTopic.TopicName)
}
exutil.ApplyNsResourceFromTemplate(oc, kafkaTopic.Namespace, parameters...)
}
// deploys KafkaUser
func (kafkaUser *KafkaUser) deployKafkaUser(oc *exutil.CLI) {
e2e.Logf("Create Kafka User")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafkaUser.Template, "-p", "NAMESPACE=" + kafkaUser.Namespace}
if kafkaUser.UserName != "" {
parameters = append(parameters, "USER_NAME="+kafkaUser.UserName)
}
if kafkaUser.Name != "" {
parameters = append(parameters, "NAME="+kafkaUser.Name)
}
exutil.ApplyNsResourceFromTemplate(oc, kafkaUser.Namespace, parameters...)
}
// deletes kafkaUser
func (kafka *KafkaUser) deleteKafkaUser(oc *exutil.CLI) {
e2e.Logf("Deleting Kafka user")
command := []string{"kafkauser", kafka.UserName, "-n", kafka.Namespace}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
// deletes kafkaTopic
func (kafkaTopic *KafkaTopic) deleteKafkaTopic(oc *exutil.CLI) {
e2e.Logf("Deleting Kafka topic")
command := []string{"kafkatopic", kafkaTopic.TopicName, "-n", kafkaTopic.Namespace}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
// deletes kafka
func (kafka *Kafka) deleteKafka(oc *exutil.CLI) {
e2e.Logf("Deleting Kafka")
command := []string{"kafka", kafka.Name, "-n", kafka.Namespace}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
// Poll to wait for kafka to be ready
func waitForKafkaReady(oc *exutil.CLI, kafkaName string, kafkaNS string) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
command := []string{"kafka", kafkaName, "-n", kafkaNS, `-o=jsonpath={.status.conditions[*].type}`}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(command...).Output()
if err != nil {
e2e.Logf("kafka status ready error: %v", err)
return false, err
}
if output == "Ready" || output == "Warning Ready" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource kafka/%s did not appear", kafkaName))
}
// Poll to wait for kafka Topic to be ready
func waitForKafkaTopicReady(oc *exutil.CLI, kafkaTopicName string, kafkaTopicNS string) {
err := wait.PollUntilContextTimeout(context.Background(), 6*time.Second, 360*time.Second, false, func(context.Context) (done bool, err error) {
command := []string{"kafkaTopic", kafkaTopicName, "-n", kafkaTopicNS, `-o=jsonpath='{.status.conditions[*].type}'`}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(command...).Output()
if err != nil {
e2e.Logf("kafka Topic status ready error: %v", err)
return false, err
}
status := strings.Replace(output, "'", "", 2)
e2e.Logf("Waiting for kafka status %s", status)
if status == "Ready" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource kafkaTopic/%s did not appear", kafkaTopicName))
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
d9256dfc-3290-48f1-995a-55762d960d85
|
deployKafka
|
['Kafka']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafka *Kafka) deployKafka(oc *exutil.CLI) {
e2e.Logf("Deploy Default Kafka")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafka.Template, "-p", "NAMESPACE=" + kafka.Namespace}
if kafka.Name != "" {
parameters = append(parameters, "NAME="+kafka.Name)
}
if kafka.StorageClass != "" {
parameters = append(parameters, "STORAGE_CLASS="+kafka.StorageClass)
}
exutil.ApplyNsResourceFromTemplate(oc, kafka.Namespace, parameters...)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
34974751-946f-41ec-964c-bd25aa9d649d
|
deployKafkaMetrics
|
['Kafka', 'KafkaMetrics']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafkaMetrics *KafkaMetrics) deployKafkaMetrics(oc *exutil.CLI) {
e2e.Logf("Deploy Kafka metrics")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafkaMetrics.Template, "-p", "NAMESPACE=" + kafkaMetrics.Namespace}
exutil.ApplyNsResourceFromTemplate(oc, kafkaMetrics.Namespace, parameters...)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
6f388220-de18-4d52-9b4e-7db05661a47f
|
deployKafkaTopic
|
['Kafka', 'KafkaTopic']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafkaTopic *KafkaTopic) deployKafkaTopic(oc *exutil.CLI) {
e2e.Logf("Create Kafka topic")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafkaTopic.Template, "-p", "NAMESPACE=" + kafkaTopic.Namespace}
if kafkaTopic.Name != "" {
parameters = append(parameters, "NAME="+kafkaTopic.Name)
}
if kafkaTopic.TopicName != "" {
parameters = append(parameters, "TOPIC="+kafkaTopic.TopicName)
}
exutil.ApplyNsResourceFromTemplate(oc, kafkaTopic.Namespace, parameters...)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
6dac545f-2fd4-47e3-8a68-792a789b0c75
|
deployKafkaUser
|
['Kafka', 'KafkaUser']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafkaUser *KafkaUser) deployKafkaUser(oc *exutil.CLI) {
e2e.Logf("Create Kafka User")
parameters := []string{"--ignore-unknown-parameters=true", "-f", kafkaUser.Template, "-p", "NAMESPACE=" + kafkaUser.Namespace}
if kafkaUser.UserName != "" {
parameters = append(parameters, "USER_NAME="+kafkaUser.UserName)
}
if kafkaUser.Name != "" {
parameters = append(parameters, "NAME="+kafkaUser.Name)
}
exutil.ApplyNsResourceFromTemplate(oc, kafkaUser.Namespace, parameters...)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
81bd30f2-fbbe-4a5f-bcc1-48536734f055
|
deleteKafkaUser
|
['Kafka', 'KafkaUser']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafka *KafkaUser) deleteKafkaUser(oc *exutil.CLI) {
e2e.Logf("Deleting Kafka user")
command := []string{"kafkauser", kafka.UserName, "-n", kafka.Namespace}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
6e427907-3f6c-4b38-af8c-ba2f5d3a9c42
|
deleteKafkaTopic
|
['Kafka', 'KafkaTopic']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafkaTopic *KafkaTopic) deleteKafkaTopic(oc *exutil.CLI) {
e2e.Logf("Deleting Kafka topic")
command := []string{"kafkatopic", kafkaTopic.TopicName, "-n", kafkaTopic.Namespace}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
80bf9143-f8a3-4467-9cc3-0ceac1f62b1d
|
deleteKafka
|
['Kafka']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func (kafka *Kafka) deleteKafka(oc *exutil.CLI) {
e2e.Logf("Deleting Kafka")
command := []string{"kafka", kafka.Name, "-n", kafka.Namespace}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
a2b2f729-1d78-44e3-9973-41b3f08e1a42
|
waitForKafkaReady
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func waitForKafkaReady(oc *exutil.CLI, kafkaName string, kafkaNS string) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
command := []string{"kafka", kafkaName, "-n", kafkaNS, `-o=jsonpath={.status.conditions[*].type}`}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(command...).Output()
if err != nil {
e2e.Logf("kafka status ready error: %v", err)
return false, err
}
if output == "Ready" || output == "Warning Ready" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource kafka/%s did not appear", kafkaName))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
9b7ee67c-6144-4e47-b22f-5f51b96311df
|
waitForKafkaTopicReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/kafka.go
|
func waitForKafkaTopicReady(oc *exutil.CLI, kafkaTopicName string, kafkaTopicNS string) {
err := wait.PollUntilContextTimeout(context.Background(), 6*time.Second, 360*time.Second, false, func(context.Context) (done bool, err error) {
command := []string{"kafkaTopic", kafkaTopicName, "-n", kafkaTopicNS, `-o=jsonpath='{.status.conditions[*].type}'`}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(command...).Output()
if err != nil {
e2e.Logf("kafka Topic status ready error: %v", err)
return false, err
}
status := strings.Replace(output, "'", "", 2)
e2e.Logf("Waiting for kafka status %s", status)
if status == "Ready" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource kafkaTopic/%s did not appear", kafkaTopicName))
}
|
netobserv
| ||||
file
|
openshift/openshift-tests-private
|
e2b498ea-639e-460c-84a0-e2e92481df87
|
loki_client
|
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
k8sresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
package netobserv
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
k8sresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type Resource struct {
Kind string
Name string
Namespace string
}
// CompareClusterResources compares the remaning resource with the requested resource provide by user
func compareClusterResources(oc *exutil.CLI, cpu, memory string) bool {
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
var remainingCPU, remainingMemory int64
re := exutil.GetRemainingResourcesNodesMap(oc, nodes)
for _, node := range nodes {
remainingCPU += re[node.Name].CPU
remainingMemory += re[node.Name].Memory
}
requiredCPU, _ := k8sresource.ParseQuantity(cpu)
requiredMemory, _ := k8sresource.ParseQuantity(memory)
e2e.Logf("the required cpu is: %d, and the required memory is: %d", requiredCPU.MilliValue(), requiredMemory.MilliValue())
e2e.Logf("the remaining cpu is: %d, and the remaning memory is: %d", remainingCPU, remainingMemory)
return remainingCPU > requiredCPU.MilliValue() && remainingMemory > requiredMemory.MilliValue()
}
// ValidateInfraAndResourcesForLoki checks cluster remaning resources and platform type
// supportedPlatforms the platform types which the case can be executed on, if it's empty, then skip this check
func validateInfraAndResourcesForLoki(oc *exutil.CLI, reqMemory, reqCPU string, supportedPlatforms ...string) bool {
currentPlatform := exutil.CheckPlatform(oc)
if currentPlatform == "aws" {
// skip the case on aws sts clusters
_, err := oc.AdminKubeClient().CoreV1().Secrets("kube-system").Get(context.Background(), "aws-creds", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return false
}
}
if len(supportedPlatforms) > 0 {
return contain(supportedPlatforms, currentPlatform) && compareClusterResources(oc, reqCPU, reqMemory)
}
return compareClusterResources(oc, reqCPU, reqMemory)
}
type lokiClient struct {
username string //Username for HTTP basic auth.
password string //Password for HTTP basic auth
address string //Server address.
orgID string //adds X-Scope-OrgID to API requests for representing tenant ID. Useful for requesting tenant data when bypassing an auth gateway.
bearerToken string //adds the Authorization header to API requests for authentication purposes.
bearerTokenFile string //adds the Authorization header to API requests for authentication purposes.
retries int //How many times to retry each query when getting an error response from Loki.
queryTags string //adds X-Query-Tags header to API requests.
quiet bool //Suppress query metadata.
startTime time.Time //Start time for reading logs
localhost bool //whether loki is port-forwarded to localhost, useful for monolithic loki
}
type lokiQueryResponse struct {
Status string `json:"status"`
Data struct {
ResultType string `json:"resultType"`
Result []struct {
Stream struct {
App string `json:"app"`
DstK8S_Namespace string `json:"DstK8S_Namespace"`
FlowDirection string `json:"FlowDirection"`
SrcK8S_Namespace string `json:"SrcK8S_Namespace"`
SrcK8S_OwnerName string `json:"SrcK8S_OwnerName"`
DstK8S_OwnerName string `json:"kubernetes_pod_name"`
} `json:"stream"`
Values [][]string `json:"values"`
} `json:"result"`
Stats struct {
Summary struct {
BytesProcessedPerSecond int `json:"bytesProcessedPerSecond"`
LinesProcessedPerSecond int `json:"linesProcessedPerSecond"`
TotalBytesProcessed int `json:"totalBytesProcessed"`
TotalLinesProcessed int `json:"totalLinesProcessed"`
ExecTime float32 `json:"execTime"`
} `json:"summary"`
Store struct {
TotalChunksRef int `json:"totalChunksRef"`
TotalChunksDownloaded int `json:"totalChunksDownloaded"`
ChunksDownloadTime int `json:"chunksDownloadTime"`
HeadChunkBytes int `json:"headChunkBytes"`
HeadChunkLines int `json:"headChunkLines"`
DecompressedBytes int `json:"decompressedBytes"`
DecompressedLines int `json:"decompressedLines"`
CompressedBytes int `json:"compressedBytes"`
TotalDuplicates int `json:"totalDuplicates"`
} `json:"store"`
Ingester struct {
TotalReached int `json:"totalReached"`
TotalChunksMatched int `json:"totalChunksMatched"`
TotalBatches int `json:"totalBatches"`
TotalLinesSent int `json:"totalLinesSent"`
HeadChunkBytes int `json:"headChunkBytes"`
HeadChunkLines int `json:"headChunkLines"`
DecompressedBytes int `json:"decompressedBytes"`
DecompressedLines int `json:"decompressedLines"`
CompressedBytes int `json:"compressedBytes"`
TotalDuplicates int `json:"totalDuplicates"`
} `json:"ingester"`
} `json:"stats"`
} `json:"data"`
}
// newLokiClient initializes a lokiClient with server address
func newLokiClient(routeAddress string, time time.Time) *lokiClient {
client := &lokiClient{}
client.address = routeAddress
client.retries = 5
client.quiet = false
client.startTime = time
client.localhost = false
return client
}
// retry sets how many times to retry each query
func (c *lokiClient) retry(retry int) *lokiClient {
nc := *c
nc.retries = retry
return &nc
}
// withToken sets the token used to do query
func (c *lokiClient) withToken(bearerToken string) *lokiClient {
nc := *c
nc.bearerToken = bearerToken
return &nc
}
// buildURL concats a url `http://foo/bar` with a path `/buzz`.
func buildURL(u, p, q string) (string, error) {
url, err := url.Parse(u)
if err != nil {
return "", err
}
url.Path = path.Join(url.Path, p)
url.RawQuery = q
return url.String(), nil
}
type queryStringBuilder struct {
values url.Values
}
func newQueryStringBuilder() *queryStringBuilder {
return &queryStringBuilder{
values: url.Values{},
}
}
// encode returns the URL-encoded query string based on key-value
// parameters added to the builder calling Set functions.
func (b *queryStringBuilder) encode() string {
return b.values.Encode()
}
func (b *queryStringBuilder) setString(name, value string) {
b.values.Set(name, value)
}
func (b *queryStringBuilder) setInt(name string, value int64) {
b.setString(name, strconv.FormatInt(value, 10))
}
func (b *queryStringBuilder) setInt32(name string, value int) {
b.setString(name, strconv.Itoa(value))
}
func (c *lokiClient) getHTTPRequestHeader() (http.Header, error) {
h := make(http.Header)
if c.username != "" && c.password != "" {
h.Set(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(c.username+":"+c.password)),
)
}
h.Set("User-Agent", "loki-logcli")
if c.orgID != "" {
h.Set("X-Scope-OrgID", c.orgID)
}
if c.queryTags != "" {
h.Set("X-Query-Tags", c.queryTags)
}
if (c.username != "" || c.password != "") && (len(c.bearerToken) > 0 || len(c.bearerTokenFile) > 0) {
return nil, fmt.Errorf("at most one of HTTP basic auth (username/password), bearer-token & bearer-token-file is allowed to be configured")
}
if len(c.bearerToken) > 0 && len(c.bearerTokenFile) > 0 {
return nil, fmt.Errorf("at most one of the options bearer-token & bearer-token-file is allowed to be configured")
}
if c.bearerToken != "" {
h.Set("Authorization", "Bearer "+c.bearerToken)
}
if c.bearerTokenFile != "" {
b, err := os.ReadFile(c.bearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", c.bearerTokenFile, err)
}
bearerToken := strings.TrimSpace(string(b))
h.Set("Authorization", "Bearer "+bearerToken)
}
return h, nil
}
func (c *lokiClient) doRequest(path, query string, quiet bool, out interface{}) error {
us, err := buildURL(c.address, path, query)
if err != nil {
return err
}
if !quiet {
e2e.Logf(us)
}
req, err := http.NewRequest("GET", us, nil)
if err != nil {
return err
}
h, err := c.getHTTPRequestHeader()
if err != nil {
return err
}
req.Header = h
var tr *http.Transport
proxy := getProxyFromEnv()
// don't use proxy if svc/loki is port-forwarded to localhost
if !c.localhost && len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
} else {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
client := &http.Client{Transport: tr}
var resp *http.Response
attempts := c.retries + 1
success := false
for attempts > 0 {
attempts--
resp, err = client.Do(req)
if err != nil {
e2e.Logf("error sending request %v", err)
continue
}
if resp.StatusCode/100 != 2 {
buf, _ := io.ReadAll(resp.Body) // nolint
e2e.Logf("Error response from server: %s (%v) attempts remaining: %d", string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body", err)
}
continue
}
success = true
break
}
if !success {
return fmt.Errorf("run out of attempts while querying the server")
}
defer func() {
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body", err)
}
}()
return json.NewDecoder(resp.Body).Decode(out)
}
func (c *lokiClient) doQuery(path string, query string, quiet bool) (*lokiQueryResponse, error) {
var err error
var r lokiQueryResponse
if err = c.doRequest(path, query, quiet, &r); err != nil {
return nil, err
}
return &r, nil
}
// queryRange uses the /api/v1/query_range endpoint to execute a range query
// logType: application, infrastructure, audit
// queryStr: string to filter logs, for example: "{kubernetes_namespace_name="test"}"
// limit: max log count
// start: Start looking for logs at this absolute time(inclusive), e.g.: time.Now().Add(time.Duration(-1)*time.Hour) means 1 hour ago
// end: Stop looking for logs at this absolute time (exclusive)
// forward: true means scan forwards through logs, false means scan backwards through logs
func (c *lokiClient) queryRange(logType string, queryStr string, limit int, start, end time.Time, forward bool) (*lokiQueryResponse, error) {
direction := func() string {
if forward {
return "FORWARD"
}
return "BACKWARD"
}
params := newQueryStringBuilder()
params.setString("query", queryStr)
params.setInt32("limit", limit)
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
params.setString("direction", direction())
logPath := ""
if len(logType) > 0 {
logPath = apiPath + logType + queryRangePath
} else {
logPath = queryRangePath
}
return c.doQuery(logPath, params.encode(), c.quiet)
}
func (c *lokiClient) searchLogsInLoki(logType, query string) (*lokiQueryResponse, error) {
res, err := c.queryRange(logType, query, 50, c.startTime, time.Now(), false)
return res, err
}
func (c *lokiClient) searchByKey(logType, key, value string) (*lokiQueryResponse, error) {
res, err := c.searchLogsInLoki(logType, "{"+key+"=\""+value+"\"}")
return res, err
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
94cae63c-f72e-4927-80ec-19d9bb980242
|
compareClusterResources
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func compareClusterResources(oc *exutil.CLI, cpu, memory string) bool {
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
var remainingCPU, remainingMemory int64
re := exutil.GetRemainingResourcesNodesMap(oc, nodes)
for _, node := range nodes {
remainingCPU += re[node.Name].CPU
remainingMemory += re[node.Name].Memory
}
requiredCPU, _ := k8sresource.ParseQuantity(cpu)
requiredMemory, _ := k8sresource.ParseQuantity(memory)
e2e.Logf("the required cpu is: %d, and the required memory is: %d", requiredCPU.MilliValue(), requiredMemory.MilliValue())
e2e.Logf("the remaining cpu is: %d, and the remaning memory is: %d", remainingCPU, remainingMemory)
return remainingCPU > requiredCPU.MilliValue() && remainingMemory > requiredMemory.MilliValue()
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
c4353def-20ae-44a6-8163-3f6fadfdc7a6
|
validateInfraAndResourcesForLoki
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func validateInfraAndResourcesForLoki(oc *exutil.CLI, reqMemory, reqCPU string, supportedPlatforms ...string) bool {
currentPlatform := exutil.CheckPlatform(oc)
if currentPlatform == "aws" {
// skip the case on aws sts clusters
_, err := oc.AdminKubeClient().CoreV1().Secrets("kube-system").Get(context.Background(), "aws-creds", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return false
}
}
if len(supportedPlatforms) > 0 {
return contain(supportedPlatforms, currentPlatform) && compareClusterResources(oc, reqCPU, reqMemory)
}
return compareClusterResources(oc, reqCPU, reqMemory)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
fbc821ea-be09-417e-a543-8a1992c6c483
|
newLokiClient
|
['"time"']
|
['lokiClient']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func newLokiClient(routeAddress string, time time.Time) *lokiClient {
client := &lokiClient{}
client.address = routeAddress
client.retries = 5
client.quiet = false
client.startTime = time
client.localhost = false
return client
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
4ddeb2ff-16a6-4dc1-9ee3-92ef7a40caac
|
retry
|
['lokiClient']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) retry(retry int) *lokiClient {
nc := *c
nc.retries = retry
return &nc
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
d80428a9-3e48-4bea-ac56-6ead5e20afab
|
withToken
|
['lokiClient']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) withToken(bearerToken string) *lokiClient {
nc := *c
nc.bearerToken = bearerToken
return &nc
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
e1f3aed0-8636-4877-86e5-72f4841a1e36
|
buildURL
|
['"net/url"', '"path"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func buildURL(u, p, q string) (string, error) {
url, err := url.Parse(u)
if err != nil {
return "", err
}
url.Path = path.Join(url.Path, p)
url.RawQuery = q
return url.String(), nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
95f12391-49e6-4384-8211-97d42697a8db
|
newQueryStringBuilder
|
['"net/url"']
|
['queryStringBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func newQueryStringBuilder() *queryStringBuilder {
return &queryStringBuilder{
values: url.Values{},
}
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
a90031ce-45e2-440e-8f05-aa7d9deb1ce2
|
encode
|
['queryStringBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (b *queryStringBuilder) encode() string {
return b.values.Encode()
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
6bc42bc6-6efa-4a83-bf7c-e5ea67267a74
|
setString
|
['queryStringBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (b *queryStringBuilder) setString(name, value string) {
b.values.Set(name, value)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
985b4ad8-11a7-4772-8c97-70e9b5dfd4de
|
setInt
|
['"strconv"']
|
['queryStringBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (b *queryStringBuilder) setInt(name string, value int64) {
b.setString(name, strconv.FormatInt(value, 10))
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
e46e77a3-6966-4e60-850b-4daced89643c
|
setInt32
|
['"strconv"']
|
['queryStringBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (b *queryStringBuilder) setInt32(name string, value int) {
b.setString(name, strconv.Itoa(value))
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
ec8bf4a0-8178-4f04-8b5e-b3af24bfb0b9
|
getHTTPRequestHeader
|
['"encoding/base64"', '"fmt"', '"net/http"', '"os"', '"strings"']
|
['lokiClient']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) getHTTPRequestHeader() (http.Header, error) {
h := make(http.Header)
if c.username != "" && c.password != "" {
h.Set(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(c.username+":"+c.password)),
)
}
h.Set("User-Agent", "loki-logcli")
if c.orgID != "" {
h.Set("X-Scope-OrgID", c.orgID)
}
if c.queryTags != "" {
h.Set("X-Query-Tags", c.queryTags)
}
if (c.username != "" || c.password != "") && (len(c.bearerToken) > 0 || len(c.bearerTokenFile) > 0) {
return nil, fmt.Errorf("at most one of HTTP basic auth (username/password), bearer-token & bearer-token-file is allowed to be configured")
}
if len(c.bearerToken) > 0 && len(c.bearerTokenFile) > 0 {
return nil, fmt.Errorf("at most one of the options bearer-token & bearer-token-file is allowed to be configured")
}
if c.bearerToken != "" {
h.Set("Authorization", "Bearer "+c.bearerToken)
}
if c.bearerTokenFile != "" {
b, err := os.ReadFile(c.bearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", c.bearerTokenFile, err)
}
bearerToken := strings.TrimSpace(string(b))
h.Set("Authorization", "Bearer "+bearerToken)
}
return h, nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
8d1b450f-8544-4cfb-8899-575fd2d1cad2
|
doRequest
|
['"crypto/tls"', '"encoding/json"', '"fmt"', '"io"', '"net/http"', '"net/url"', '"path"']
|
['lokiClient']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) doRequest(path, query string, quiet bool, out interface{}) error {
us, err := buildURL(c.address, path, query)
if err != nil {
return err
}
if !quiet {
e2e.Logf(us)
}
req, err := http.NewRequest("GET", us, nil)
if err != nil {
return err
}
h, err := c.getHTTPRequestHeader()
if err != nil {
return err
}
req.Header = h
var tr *http.Transport
proxy := getProxyFromEnv()
// don't use proxy if svc/loki is port-forwarded to localhost
if !c.localhost && len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
} else {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
client := &http.Client{Transport: tr}
var resp *http.Response
attempts := c.retries + 1
success := false
for attempts > 0 {
attempts--
resp, err = client.Do(req)
if err != nil {
e2e.Logf("error sending request %v", err)
continue
}
if resp.StatusCode/100 != 2 {
buf, _ := io.ReadAll(resp.Body) // nolint
e2e.Logf("Error response from server: %s (%v) attempts remaining: %d", string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body", err)
}
continue
}
success = true
break
}
if !success {
return fmt.Errorf("run out of attempts while querying the server")
}
defer func() {
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body", err)
}
}()
return json.NewDecoder(resp.Body).Decode(out)
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
70200762-5546-4b54-9b75-7fad2d3512cf
|
doQuery
|
['"path"']
|
['lokiClient', 'lokiQueryResponse']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) doQuery(path string, query string, quiet bool) (*lokiQueryResponse, error) {
var err error
var r lokiQueryResponse
if err = c.doRequest(path, query, quiet, &r); err != nil {
return nil, err
}
return &r, nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
c8be1c19-b203-4490-b35d-429773a35bee
|
queryRange
|
['"time"']
|
['lokiClient', 'lokiQueryResponse']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) queryRange(logType string, queryStr string, limit int, start, end time.Time, forward bool) (*lokiQueryResponse, error) {
direction := func() string {
if forward {
return "FORWARD"
}
return "BACKWARD"
}
params := newQueryStringBuilder()
params.setString("query", queryStr)
params.setInt32("limit", limit)
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
params.setString("direction", direction())
logPath := ""
if len(logType) > 0 {
logPath = apiPath + logType + queryRangePath
} else {
logPath = queryRangePath
}
return c.doQuery(logPath, params.encode(), c.quiet)
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
e03df445-c549-4900-a904-d05ceb7b6ed1
|
searchLogsInLoki
|
['"time"']
|
['lokiClient', 'lokiQueryResponse']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) searchLogsInLoki(logType, query string) (*lokiQueryResponse, error) {
res, err := c.queryRange(logType, query, 50, c.startTime, time.Now(), false)
return res, err
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
7f390e67-3183-42c1-ac94-31a283f37aff
|
searchByKey
|
['lokiClient', 'lokiQueryResponse']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_client.go
|
func (c *lokiClient) searchByKey(logType, key, value string) (*lokiQueryResponse, error) {
res, err := c.searchLogsInLoki(logType, "{"+key+"=\""+value+"\"}")
return res, err
}
|
netobserv
| ||||
file
|
openshift/openshift-tests-private
|
63b99622-6e73-4d5d-b2aa-cba9d3f09556
|
loki_storage
|
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/iam/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
package netobserv
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/iam/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
const (
minioNS = "minio-aosqe"
minioSecret = "minio-creds"
apiPath = "/api/logs/v1/"
queryRangePath = "/loki/api/v1/query_range"
)
// s3Credential defines the s3 credentials
type s3Credential struct {
Region string
AccessKeyID string
SecretAccessKey string
Endpoint string //the endpoint of s3 service
}
type resource struct {
kind string
name string
namespace string
}
func getAWSCredentialFromCluster(oc *exutil.CLI) s3Credential {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/aws_access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/aws_secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
cred := s3Credential{Region: region, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
return cred
}
// get azure storage account from image registry
// TODO: create a storage account and use that accout to manage azure container
func getAzureStorageAccount(oc *exutil.CLI) (string, string) {
var accountName string
imageRegistry, err := oc.AdminKubeClient().AppsV1().Deployments("openshift-image-registry").Get(context.Background(), "image-registry", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, container := range imageRegistry.Spec.Template.Spec.Containers {
for _, env := range container.Env {
if env.Name == "REGISTRY_STORAGE_AZURE_ACCOUNTNAME" {
accountName = env.Value
break
}
}
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/image-registry-private-configuration", "-n", "openshift-image-registry", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accountKey, err := os.ReadFile(dirname + "/REGISTRY_STORAGE_AZURE_ACCOUNTKEY")
o.Expect(err).NotTo(o.HaveOccurred())
return accountName, string(accountKey)
}
func createSecretForODFBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/noobaa-admin", "-n", "openshift-storage", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://s3.openshift-storage.svc"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/AWS_ACCESS_KEY_ID", "--from-file=access_key_secret="+dirname+"/AWS_SECRET_ACCESS_KEY", "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
func createSecretForMinIOBucket(oc *exutil.CLI, bucketName, secretName, ns, minIONS string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+minioSecret, "-n", minIONS, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://minio." + minIONS + ".svc"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/access_key_id", "--from-file=access_key_secret="+dirname+"/secret_access_key", "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
func createS3Bucket(client *s3.Client, bucketName string, cred s3Credential) error {
// check if the bucket exists or not
// if exists, clear all the objects in the bucket
// if not, create the bucket
exist := false
buckets, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
if err != nil {
return err
}
for _, bu := range buckets.Buckets {
if *bu.Name == bucketName {
exist = true
break
}
}
// clear all the objects in the bucket
if exist {
return emptyS3Bucket(client, bucketName)
}
/*
Per https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#API_CreateBucket_RequestBody,
us-east-1 is the default region and it's not a valid value of LocationConstraint,
using `LocationConstraint: types.BucketLocationConstraint("us-east-1")` gets error `InvalidLocationConstraint`.
Here remove the configration when the region is us-east-1
*/
if len(cred.Region) == 0 || cred.Region == "us-east-1" {
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName})
return err
}
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName, CreateBucketConfiguration: &types.CreateBucketConfiguration{LocationConstraint: types.BucketLocationConstraint(cred.Region)}})
return err
}
func deleteS3Bucket(client *s3.Client, bucketName string) error {
// empty bucket
err := emptyS3Bucket(client, bucketName)
if err != nil {
return err
}
// delete bucket
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: &bucketName})
return err
}
func emptyS3Bucket(client *s3.Client, bucketName string) error {
// List objects in the bucket
objects, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: &bucketName,
})
if err != nil {
return err
}
// Delete objects in the bucket
if len(objects.Contents) > 0 {
objectIdentifiers := make([]types.ObjectIdentifier, len(objects.Contents))
for i, object := range objects.Contents {
objectIdentifiers[i] = types.ObjectIdentifier{Key: object.Key}
}
quiet := true
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: &bucketName,
Delete: &types.Delete{
Objects: objectIdentifiers,
Quiet: &quiet,
},
})
if err != nil {
return err
}
}
// Check if there are more objects to delete and handle pagination
if *objects.IsTruncated {
return emptyS3Bucket(client, bucketName)
}
return nil
}
// CreateSecretForAWSS3Bucket creates a secret for Loki to connect to s3 bucket
func createSecretForAWSS3Bucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
cred := getAWSCredentialFromCluster(oc)
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
f1, err1 := os.Create(dirname + "/aws_access_key_id")
o.Expect(err1).NotTo(o.HaveOccurred())
defer f1.Close()
_, err = f1.WriteString(cred.AccessKeyID)
o.Expect(err).NotTo(o.HaveOccurred())
f2, err2 := os.Create(dirname + "/aws_secret_access_key")
o.Expect(err2).NotTo(o.HaveOccurred())
defer f2.Close()
_, err = f2.WriteString(cred.SecretAccessKey)
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "https://s3." + cred.Region + ".amazonaws.com"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/aws_access_key_id", "--from-file=access_key_secret="+dirname+"/aws_secret_access_key", "--from-literal=region="+cred.Region, "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
// Creates a secret for Loki to connect to gcs bucket
func createSecretForGCSBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
// for GCP STS clusters, get gcp-credentials from env var GOOGLE_APPLICATION_CREDENTIALS
// TODO: support using STS token to create the secret
_, err = oc.AdminKubeClient().CoreV1().Secrets("kube-system").Get(context.Background(), "gcp-credentials", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
gcsCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", ns, "--from-literal=bucketname="+bucketName, "--from-file=key.json="+gcsCred).Execute()
}
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/gcp-credentials", "-n", "kube-system", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", ns, "--from-literal=bucketname="+bucketName, "--from-file=key.json="+dirname+"/service_account.json").Execute()
}
func createSecretForSwiftContainer(oc *exutil.CLI, containerName, secretName, ns string, cred *exutil.OpenstackCredentials) error {
userID, domainID := exutil.GetOpenStackUserIDAndDomainID(cred)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName,
"--from-literal=auth_url="+cred.Clouds.Openstack.Auth.AuthURL,
"--from-literal=username="+cred.Clouds.Openstack.Auth.Username,
"--from-literal=user_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=user_domain_id="+domainID,
"--from-literal=user_id="+userID,
"--from-literal=password="+cred.Clouds.Openstack.Auth.Password,
"--from-literal=domain_id="+domainID,
"--from-literal=domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=container_name="+containerName,
"--from-literal=project_id="+cred.Clouds.Openstack.Auth.ProjectID,
"--from-literal=project_name="+cred.Clouds.Openstack.Auth.ProjectName,
"--from-literal=project_domain_id="+domainID,
"--from-literal=project_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName).Execute()
return err
}
// creates a secret for Loki to connect to azure container
func createSecretForAzureContainer(oc *exutil.CLI, bucketName, secretName, ns string) error {
environment := "AzureGlobal"
accountName, accountKey := getAzureStorageAccount(oc)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName, "--from-literal=environment="+environment, "--from-literal=container="+bucketName, "--from-literal=account_name="+accountName, "--from-literal=account_key="+accountKey).Execute()
return err
}
func getODFCreds(oc *exutil.CLI) s3Credential {
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/noobaa-admin", "-n", "openshift-storage", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/AWS_ACCESS_KEY_ID")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/AWS_SECRET_ACCESS_KEY")
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://" + getRouteAddress(oc, "openshift-storage", "s3")
return s3Credential{Endpoint: endpoint, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
}
func getMinIOCreds(oc *exutil.CLI, ns string) s3Credential {
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+minioSecret, "-n", ns, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://" + getRouteAddress(oc, ns, "minio")
return s3Credential{Endpoint: endpoint, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
}
// initialize a s3 client with credential
// TODO: add an option to initialize a new client with STS
func newS3Client(cred s3Credential) *s3.Client {
var err error
var cfg aws.Config
if len(cred.Endpoint) > 0 {
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: cred.Endpoint,
HostnameImmutable: true,
Source: aws.EndpointSourceCustom,
}, nil
})
// For ODF and Minio, they're deployed in OCP clusters
// In some clusters, we can't connect it without proxy, here add proxy settings to s3 client when there has http_proxy or https_proxy in the env var
httpClient := awshttp.NewBuildableClient().WithTransportOptions(func(tr *http.Transport) {
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr.Proxy = http.ProxyURL(proxyURL)
}
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
})
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithEndpointResolverWithOptions(customResolver),
config.WithHTTPClient(httpClient),
config.WithRegion("auto"))
} else {
// aws s3
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithRegion(cred.Region))
}
o.Expect(err).NotTo(o.HaveOccurred())
return s3.NewFromConfig(cfg)
}
func getStorageClassName(oc *exutil.CLI) (string, error) {
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return "", err
}
if len(scs.Items) == 0 {
return "", fmt.Errorf("there is no storageclass in the cluster")
}
for _, sc := range scs.Items {
if sc.ObjectMeta.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
return sc.Name, nil
}
}
return scs.Items[0].Name, nil
}
func getSATokenFromSecret(oc *exutil.CLI, name, ns string) string {
secrets, err := oc.AdminKubeClient().CoreV1().Secrets(ns).List(context.Background(), metav1.ListOptions{})
if err != nil {
return ""
}
var secret string
for _, s := range secrets.Items {
if strings.Contains(s.Name, name+"-token") {
secret = s.Name
break
}
}
dirname := "/tmp/" + oc.Namespace() + "-sa"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+secret, "-n", ns, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken, err := os.ReadFile(dirname + "/token")
o.Expect(err).NotTo(o.HaveOccurred())
return string(bearerToken)
}
// PrepareResourcesForLokiStack creates buckets/containers in backend storage provider, and creates the secret for Loki to use
func (l lokiStack) prepareResourcesForLokiStack(oc *exutil.CLI) error {
var err error
if len(l.BucketName) == 0 {
return fmt.Errorf("the bucketName should not be empty")
}
switch l.StorageType {
case "s3":
{
if exutil.IsWorkloadIdentityCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg := readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
stsClient := newStsClient(cfg)
awsAccountID, _ := getAwsAccount(stsClient)
oidcName, err := getOIDC(oc)
o.Expect(err).NotTo(o.HaveOccurred())
lokiIAMRoleName := l.Name + "-" + exutil.GetRandomString()
roleArn := createIAMRoleForLokiSTSDeployment(iamClient, oidcName, awsAccountID, l.Namespace, l.Name, lokiIAMRoleName)
os.Setenv("LOKI_ROLE_NAME_ON_STS", lokiIAMRoleName)
patchLokiOperatorWithAWSRoleArn(oc, "loki-operator", "openshift-operators-redhat", roleArn)
var s3AssumeRoleName string
defer func() {
deleteIAMroleonAWS(iamClient, s3AssumeRoleName)
}()
s3AssumeRoleArn, s3AssumeRoleName := createS3AssumeRole(stsClient, iamClient, l.Name)
createS3ObjectStorageBucketWithSTS(cfg, stsClient, s3AssumeRoleArn, l.BucketName)
createObjectStorageSecretOnAWSSTSCluster(oc, region, l.StorageSecret, l.BucketName, l.Namespace)
} else {
cred := getAWSCredentialFromCluster(oc)
client := newS3Client(cred)
err = createS3Bucket(client, l.BucketName, cred)
if err != nil {
return err
}
err = createSecretForAWSS3Bucket(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
if !readAzureCredentials() {
g.Skip("Azure Credentials not found. Skip case!")
} else {
performManagedIdentityAndSecretSetupForAzureWIF(oc, l.Name, l.Namespace, l.BucketName, l.StorageSecret)
}
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
if err1 != nil {
return fmt.Errorf("can't get azure storage account from cluster: %v", err1)
}
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.BucketName)
if err2 != nil {
return err2
}
err = exutil.CreateAzureStorageBlobContainer(client)
if err != nil {
return err
}
err = createSecretForAzureContainer(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
}
case "gcs":
{
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
err = exutil.CreateGCSBucket(projectID, l.BucketName)
if err != nil {
return err
}
if exutil.IsWorkloadIdentityCluster(oc) {
clusterName := getInfrastructureName(oc)
gcsSAName := generateServiceAccountNameForGCS(clusterName)
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME", gcsSAName)
projectNumber, err1 := getGCPProjectNumber(projectID)
if err1 != nil {
return fmt.Errorf("can't get GCP project number: %v", err1)
}
poolID, err2 := getPoolID(oc)
if err2 != nil {
return fmt.Errorf("can't get pool ID: %v", err2)
}
sa, err3 := createServiceAccountOnGCP(projectID, gcsSAName)
if err3 != nil {
return fmt.Errorf("can't create service account: %v", err3)
}
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL", sa.Email)
err4 := grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, l.Namespace, l.Name, sa.Email)
if err4 != nil {
return fmt.Errorf("can't add roles to the serviceaccount: %v", err4)
}
err = createSecretForGCSBucketWithSTS(oc, projectNumber, poolID, sa.Email, l.Namespace, l.StorageSecret, l.BucketName)
} else {
err = createSecretForGCSBucket(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.CreateOpenStackContainer(client, l.BucketName)
if err != nil {
return err
}
err = createSecretForSwiftContainer(oc, l.BucketName, l.StorageSecret, l.Namespace, cred)
}
case "odf":
{
err = createObjectBucketClaim(oc, l.Namespace, l.BucketName)
if err != nil {
return err
}
err = createSecretForODFBucket(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
client := newS3Client(cred)
err = createS3Bucket(client, l.BucketName, cred)
if err != nil {
return err
}
err = createSecretForMinIOBucket(oc, l.BucketName, l.StorageSecret, l.Namespace, minioNS)
}
}
return err
}
func (l lokiStack) removeObjectStorage(oc *exutil.CLI) {
resource{"secret", l.StorageSecret, l.Namespace}.clear(oc)
var err error
switch l.StorageType {
case "s3":
{
if exutil.IsWorkloadIdentityCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg := readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
stsClient := newStsClient(cfg)
var s3AssumeRoleName string
defer func() {
deleteIAMroleonAWS(iamClient, s3AssumeRoleName)
}()
s3AssumeRoleArn, s3AssumeRoleName := createS3AssumeRole(stsClient, iamClient, l.Name)
if checkIfS3bucketExistsWithSTS(cfg, stsClient, s3AssumeRoleArn, l.BucketName) {
deleteS3bucketWithSTS(cfg, stsClient, s3AssumeRoleArn, l.BucketName)
}
deleteIAMroleonAWS(iamClient, os.Getenv("LOKI_ROLE_NAME_ON_STS"))
os.Unsetenv("LOKI_ROLE_NAME_ON_STS")
} else {
cred := getAWSCredentialFromCluster(oc)
client := newS3Client(cred)
err = deleteS3Bucket(client, l.BucketName)
}
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
resourceGroup, err := getResourceGroupOnAzure(oc)
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
deleteManagedIdentityOnAzure(cred, azureSubscriptionID, resourceGroup, l.Name)
deleteAzureStorageAccount(cred, azureSubscriptionID, resourceGroup, os.Getenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT"))
os.Unsetenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT")
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.BucketName)
o.Expect(err2).NotTo(o.HaveOccurred())
err = exutil.DeleteAzureStorageBlobContainer(client)
}
}
case "gcs":
{
if exutil.IsWorkloadIdentityCluster(oc) {
sa := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
if sa == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_NAME is not set, no need to delete the serviceaccount")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
email := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
if email == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL is not set, no need to delete the policies")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
projectNumber, _ := getGCPProjectNumber(projectID)
poolID, _ := getPoolID(oc)
err = removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, l.Namespace, l.Name, email)
o.Expect(err).NotTo(o.HaveOccurred())
err = removeServiceAccountFromGCP("projects/" + projectID + "/serviceAccounts/" + email)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
err = exutil.DeleteGCSBucket(l.BucketName)
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.DeleteOpenStackContainer(client, l.BucketName)
}
case "odf":
{
err = deleteObjectBucketClaim(oc, l.Namespace, l.BucketName)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
client := newS3Client(cred)
err = deleteS3Bucket(client, l.BucketName)
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func useExtraObjectStorage(oc *exutil.CLI) string {
if checkODF(oc) {
e2e.Logf("use the existing ODF storage service")
return "odf"
}
ready, err := checkMinIO(oc, minioNS)
if ready {
e2e.Logf("use existing MinIO storage service")
return "minio"
}
if strings.Contains(err.Error(), "No resources found") || strings.Contains(err.Error(), "not found") {
e2e.Logf("deploy MinIO and use this MinIO as storage service")
deployMinIO(oc)
return "minio"
}
return ""
}
// return the storage type per different platform
func getStorageType(oc *exutil.CLI) string {
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
{
return "s3"
}
case "gcp":
{
return "gcs"
}
case "azure":
{
return "azure"
}
case "openstack":
{
return "swift"
}
default:
{
return useExtraObjectStorage(oc)
}
}
}
// checkODF check if the ODF is installed in the cluster or not
// here only checks the sc/ocs-storagecluster-ceph-rbd and svc/s3
func checkODF(oc *exutil.CLI) bool {
svcFound := false
expectedSC := []string{"openshift-storage.noobaa.io", "ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"}
var scInCluster []string
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, sc := range scs.Items {
scInCluster = append(scInCluster, sc.Name)
}
for _, s := range expectedSC {
if !contain(scInCluster, s) {
return false
}
}
_, err = oc.AdminKubeClient().CoreV1().Services("openshift-storage").Get(context.Background(), "s3", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return svcFound
}
// checkMinIO
func checkMinIO(oc *exutil.CLI, ns string) (bool, error) {
podReady, svcFound := false, false
pod, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "app=minio"})
if err != nil {
return false, err
}
if len(pod.Items) > 0 && pod.Items[0].Status.Phase == "Running" {
podReady = true
}
_, err = oc.AdminKubeClient().CoreV1().Services(ns).Get(context.Background(), "minio", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return podReady && svcFound, err
}
func deployMinIO(oc *exutil.CLI) {
// create namespace
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), minioNS, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// create secret
_, err = oc.AdminKubeClient().CoreV1().Secrets(minioNS).Get(context.Background(), minioSecret, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", minioSecret, "-n", minioNS, "--from-literal=access_key_id="+getRandomString(), "--from-literal=secret_access_key=passwOOrd"+getRandomString()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// deploy minIO
deployTemplate := exutil.FixturePath("testdata", "logging", "minIO", "deploy.yaml")
deployFile, err := processTemplate(oc, "-n", minioNS, "-f", deployTemplate, "-p", "NAMESPACE="+minioNS, "NAME=minio", "SECRET_NAME="+minioSecret)
defer os.Remove(deployFile)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("apply").Args("-f", deployFile, "-n", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for minio to be ready
for _, rs := range []string{"deployment", "svc", "route"} {
resource{rs, "minio", minioNS}.WaitForResourceToAppear(oc)
}
WaitForDeploymentPodsToBeReady(oc, minioNS, "minio")
}
func removeMinIO(oc *exutil.CLI) {
deleteNamespace(oc, minioNS)
}
// WaitForDeploymentPodsToBeReady waits for the specific deployment to be ready
func WaitForDeploymentPodsToBeReady(oc *exutil.CLI, namespace string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for deployment/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = deployment.Spec.Selector.MatchLabels
if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas {
e2e.Logf("Deployment %s available (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("deployment %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("deployment %s is not available", name))
}
func (r resource) WaitForResourceToAppear(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
msg := fmt.Sprintf("%v", output)
if strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
e2e.Logf("Find %s %s", r.kind, r.name)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource %s/%s is not appear", r.kind, r.name))
}
func generateServiceAccountNameForGCS(clusterName string) string {
// Service Account should be between 6-30 characters long
name := clusterName + getRandomString()
return name
}
// To read Azure subscription json file from local disk.
// Also injects ENV vars needed to perform certain operations on Managed Identities.
func readAzureCredentials() bool {
var azureCredFile string
envDir, present := os.LookupEnv("CLUSTER_PROFILE_DIR")
if present {
azureCredFile = filepath.Join(envDir, "osServicePrincipal.json")
} else {
authFileLocation, present := os.LookupEnv("AZURE_AUTH_LOCATION")
if present {
azureCredFile = authFileLocation
}
}
if len(azureCredFile) > 0 {
fileContent, err := os.ReadFile(azureCredFile)
o.Expect(err).NotTo(o.HaveOccurred())
subscriptionID := gjson.Get(string(fileContent), `azure_subscription_id`).String()
if subscriptionID == "" {
subscriptionID = gjson.Get(string(fileContent), `subscriptionId`).String()
}
os.Setenv("AZURE_SUBSCRIPTION_ID", subscriptionID)
tenantID := gjson.Get(string(fileContent), `azure_tenant_id`).String()
if tenantID == "" {
tenantID = gjson.Get(string(fileContent), `tenantId`).String()
}
os.Setenv("AZURE_TENANT_ID", tenantID)
clientID := gjson.Get(string(fileContent), `azure_client_id`).String()
if clientID == "" {
clientID = gjson.Get(string(fileContent), `clientId`).String()
}
os.Setenv("AZURE_CLIENT_ID", clientID)
clientSecret := gjson.Get(string(fileContent), `azure_client_secret`).String()
if clientSecret == "" {
clientSecret = gjson.Get(string(fileContent), `clientSecret`).String()
}
os.Setenv("AZURE_CLIENT_SECRET", clientSecret)
return true
}
return false
}
func getGCPProjectNumber(projectID string) (string, error) {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return "", err
}
project, err := crmService.Projects.Get(projectID).Do()
if err != nil {
return "", err
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
}
func getPoolID(oc *exutil.CLI) (string, error) {
// pool_id="$(oc get authentication cluster -o json | jq -r .spec.serviceAccountIssuer | sed 's/.*\/\([^\/]*\)-oidc/\1/')"
issuer, err := getOIDC(oc)
if err != nil {
return "", err
}
return strings.Split(strings.Split(issuer, "/")[1], "-oidc")[0], nil
}
func createServiceAccountOnGCP(projectID, name string) (*iam.ServiceAccount, error) {
e2e.Logf("start to creating serviceaccount on GCP")
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return nil, fmt.Errorf("iam.NewService: %w", err)
}
request := &iam.CreateServiceAccountRequest{
AccountId: name,
ServiceAccount: &iam.ServiceAccount{
DisplayName: "Service Account for " + name,
},
}
account, err := service.Projects.ServiceAccounts.Create("projects/"+projectID, request).Do()
if err != nil {
return nil, fmt.Errorf("failed to create serviceaccount: %w", err)
}
e2e.Logf("Created service account: %v", account)
return account, nil
}
func grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, lokiNS, lokiStackName, serviceAccountEmail string) error {
gcsRoles := []string{
"roles/iam.workloadIdentityUser",
"roles/storage.objectAdmin",
}
subjects := []string{
"system:serviceaccount:" + lokiNS + ":" + lokiStackName,
"system:serviceaccount:" + lokiNS + ":" + lokiStackName + "-ruler",
}
for _, role := range gcsRoles {
err := addBinding(projectID, "serviceAccount:"+serviceAccountEmail, role)
if err != nil {
return fmt.Errorf("error adding role %s to %s: %v", role, serviceAccountEmail, err)
}
for _, sub := range subjects {
err := addBinding(projectID, "principal://iam.googleapis.com/projects/"+projectNumber+"/locations/global/workloadIdentityPools/"+poolID+"/subject/"+sub, role)
if err != nil {
return fmt.Errorf("error adding role %s to %s: %v", role, sub, err)
}
}
}
return nil
}
// ref: https://github.com/GoogleCloudPlatform/golang-samples/blob/main/iam/quickstart/quickstart.go
func addBinding(projectID, member, role string) error {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return fmt.Errorf("cloudresourcemanager.NewService: %v", err)
}
policy, err := getPolicy(crmService, projectID)
if err != nil {
return fmt.Errorf("error getting policy: %v", err)
}
// Find the policy binding for role. Only one binding can have the role.
var binding *cloudresourcemanager.Binding
for _, b := range policy.Bindings {
if b.Role == role {
binding = b
break
}
}
if binding != nil {
// If the binding exists, adds the member to the binding
binding.Members = append(binding.Members, member)
} else {
// If the binding does not exist, adds a new binding to the policy
binding = &cloudresourcemanager.Binding{
Role: role,
Members: []string{member},
}
policy.Bindings = append(policy.Bindings, binding)
}
return setPolicy(crmService, projectID, policy)
}
// getPolicy gets the project's IAM policy
func getPolicy(crmService *cloudresourcemanager.Service, projectID string) (*cloudresourcemanager.Policy, error) {
request := new(cloudresourcemanager.GetIamPolicyRequest)
policy, err := crmService.Projects.GetIamPolicy(projectID, request).Do()
if err != nil {
return nil, err
}
return policy, nil
}
// setPolicy sets the project's IAM policy
func setPolicy(crmService *cloudresourcemanager.Service, projectID string, policy *cloudresourcemanager.Policy) error {
request := new(cloudresourcemanager.SetIamPolicyRequest)
request.Policy = policy
_, err := crmService.Projects.SetIamPolicy(projectID, request).Do()
return err
}
func createObjectBucketClaim(oc *exutil.CLI, ns, name string) error {
template := exutil.FixturePath("testdata", "logging", "odf", "objectBucketClaim.yaml")
obc := resource{"objectbucketclaims", name, ns}
err := obc.applyFromTemplate(oc, "-f", template, "-n", ns, "-p", "NAME="+name, "NAMESPACE="+ns)
if err != nil {
return err
}
obc.WaitForResourceToAppear(oc)
resource{"objectbuckets", "obc-" + ns + "-" + name, ns}.WaitForResourceToAppear(oc)
assertResourceStatus(oc, "objectbucketclaims", name, ns, "{.status.phase}", "Bound")
return nil
}
func (r resource) applyFromTemplate(oc *exutil.CLI, parameters ...string) error {
parameters = append(parameters, "-n", r.namespace)
file, err := processTemplate(oc, parameters...)
defer os.Remove(file)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", r.namespace).Output()
if err != nil {
return fmt.Errorf(output)
}
r.WaitForResourceToAppear(oc)
return nil
}
// Assert the status of a resource
func assertResourceStatus(oc *exutil.CLI, kind, name, namespace, jsonpath, exptdStatus string) {
parameters := []string{kind, name, "-o", "jsonpath=" + jsonpath}
if namespace != "" {
parameters = append(parameters, "-n", namespace)
}
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(parameters...).Output()
if err != nil {
return false, err
}
if strings.Compare(status, exptdStatus) != 0 {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s/%s value for %s is not %s", kind, name, jsonpath, exptdStatus))
}
func createSecretForGCSBucketWithSTS(oc *exutil.CLI, projectNumber, poolID, serviceAccountEmail, ns, name, bucketName string) error {
providerName := "projects/" + projectNumber + "/locations/global/workloadIdentityPools/" + poolID + "/providers/" + poolID
audience, err := getGCPAudience(providerName)
if err != nil {
return err
}
key := `{
"universe_domain": "googleapis.com",
"type": "external_account",
"audience": "//iam.googleapis.com/` + providerName + `",
"subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
"token_url": "https://sts.googleapis.com/v1/token",
"credential_source": {
"file": "/var/run/secrets/storage/serviceaccount/token",
"format": {
"type": "text"
}
},
"service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/` + serviceAccountEmail + `:generateAccessToken"
}`
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, name,
"--from-literal=bucketname="+bucketName, "--from-literal=audience="+audience, "--from-literal=key.json="+key).Execute()
}
func getGCPAudience(providerName string) (string, error) {
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return "", fmt.Errorf("iam.NewService: %w", err)
}
audience, err := service.Projects.Locations.WorkloadIdentityPools.Providers.Get(providerName).Do()
if err != nil {
return "", fmt.Errorf("can't get audience: %v", err)
}
return audience.Oidc.AllowedAudiences[0], nil
}
// delete the objects in the cluster
func (r resource) clear(oc *exutil.CLI) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", msg)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return nil
}
return err
}
err = r.WaitUntilResourceIsGone(oc)
return err
}
func (r resource) WaitUntilResourceIsGone(oc *exutil.CLI) error {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return true, nil
}
return true, err
}
return false, nil
})
if err != nil {
return fmt.Errorf("can't remove %s/%s in %s project", r.kind, r.name, r.namespace)
}
return nil
}
func removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, lokiNS, lokiStackName, serviceAccountEmail string) error {
gcsRoles := []string{
"roles/iam.workloadIdentityUser",
"roles/storage.objectAdmin",
}
subjects := []string{
"system:serviceaccount:" + lokiNS + ":" + lokiStackName,
"system:serviceaccount:" + lokiNS + ":" + lokiStackName + "-ruler",
}
for _, role := range gcsRoles {
err := removeMember(projectID, "serviceAccount:"+serviceAccountEmail, role)
if err != nil {
return fmt.Errorf("error removing role %s from %s: %v", role, serviceAccountEmail, err)
}
for _, sub := range subjects {
err := removeMember(projectID, "principal://iam.googleapis.com/projects/"+projectNumber+"/locations/global/workloadIdentityPools/"+poolID+"/subject/"+sub, role)
if err != nil {
return fmt.Errorf("error removing role %s from %s: %v", role, sub, err)
}
}
}
return nil
}
func removeServiceAccountFromGCP(name string) error {
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return fmt.Errorf("iam.NewService: %w", err)
}
_, err = service.Projects.ServiceAccounts.Delete(name).Do()
if err != nil {
return fmt.Errorf("can't remove service account: %v", err)
}
return nil
}
// removeMember removes the member from the project's IAM policy
func removeMember(projectID, member, role string) error {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return fmt.Errorf("cloudresourcemanager.NewService: %v", err)
}
policy, err := getPolicy(crmService, projectID)
if err != nil {
return fmt.Errorf("error getting policy: %v", err)
}
// Find the policy binding for role. Only one binding can have the role.
var binding *cloudresourcemanager.Binding
var bindingIndex int
for i, b := range policy.Bindings {
if b.Role == role {
binding = b
bindingIndex = i
break
}
}
if len(binding.Members) == 1 && binding.Members[0] == member {
// If the member is the only member in the binding, removes the binding
last := len(policy.Bindings) - 1
policy.Bindings[bindingIndex] = policy.Bindings[last]
policy.Bindings = policy.Bindings[:last]
} else {
// If there is more than one member in the binding, removes the member
var memberIndex int
var exist bool
for i, mm := range binding.Members {
if mm == member {
memberIndex = i
exist = true
break
}
}
if exist {
last := len(policy.Bindings[bindingIndex].Members) - 1
binding.Members[memberIndex] = binding.Members[last]
binding.Members = binding.Members[:last]
}
}
return setPolicy(crmService, projectID, policy)
}
func deleteObjectBucketClaim(oc *exutil.CLI, ns, name string) error {
obc := resource{"objectbucketclaims", name, ns}
err := obc.clear(oc)
if err != nil {
return err
}
return obc.WaitUntilResourceIsGone(oc)
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
bb9e6e28-d008-4f65-b35b-c6590f8b9011
|
getAWSCredentialFromCluster
|
['"os"', '"github.com/aws/aws-sdk-go-v2/aws"']
|
['s3Credential']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getAWSCredentialFromCluster(oc *exutil.CLI) s3Credential {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/aws_access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/aws_secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
cred := s3Credential{Region: region, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
return cred
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
e56dfa37-8bd9-4705-bcdf-df83f508ba2b
|
getAzureStorageAccount
|
['"context"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getAzureStorageAccount(oc *exutil.CLI) (string, string) {
var accountName string
imageRegistry, err := oc.AdminKubeClient().AppsV1().Deployments("openshift-image-registry").Get(context.Background(), "image-registry", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, container := range imageRegistry.Spec.Template.Spec.Containers {
for _, env := range container.Env {
if env.Name == "REGISTRY_STORAGE_AZURE_ACCOUNTNAME" {
accountName = env.Value
break
}
}
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/image-registry-private-configuration", "-n", "openshift-image-registry", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accountKey, err := os.ReadFile(dirname + "/REGISTRY_STORAGE_AZURE_ACCOUNTKEY")
o.Expect(err).NotTo(o.HaveOccurred())
return accountName, string(accountKey)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
6c3de5c2-5574-4950-bc68-36f3b4823d25
|
createSecretForODFBucket
|
['"fmt"', '"net/http"', '"os"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createSecretForODFBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/noobaa-admin", "-n", "openshift-storage", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://s3.openshift-storage.svc"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/AWS_ACCESS_KEY_ID", "--from-file=access_key_secret="+dirname+"/AWS_SECRET_ACCESS_KEY", "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
023164f4-a0e7-471f-b834-a731897d8f45
|
createSecretForMinIOBucket
|
['"fmt"', '"net/http"', '"os"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createSecretForMinIOBucket(oc *exutil.CLI, bucketName, secretName, ns, minIONS string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+minioSecret, "-n", minIONS, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://minio." + minIONS + ".svc"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/access_key_id", "--from-file=access_key_secret="+dirname+"/secret_access_key", "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
bc1484ea-a47b-4977-a4bc-f78d7f384848
|
createS3Bucket
|
['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"', '"github.com/aws/aws-sdk-go-v2/service/s3/types"']
|
['s3Credential']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createS3Bucket(client *s3.Client, bucketName string, cred s3Credential) error {
// check if the bucket exists or not
// if exists, clear all the objects in the bucket
// if not, create the bucket
exist := false
buckets, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
if err != nil {
return err
}
for _, bu := range buckets.Buckets {
if *bu.Name == bucketName {
exist = true
break
}
}
// clear all the objects in the bucket
if exist {
return emptyS3Bucket(client, bucketName)
}
/*
Per https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#API_CreateBucket_RequestBody,
us-east-1 is the default region and it's not a valid value of LocationConstraint,
using `LocationConstraint: types.BucketLocationConstraint("us-east-1")` gets error `InvalidLocationConstraint`.
Here remove the configration when the region is us-east-1
*/
if len(cred.Region) == 0 || cred.Region == "us-east-1" {
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName})
return err
}
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName, CreateBucketConfiguration: &types.CreateBucketConfiguration{LocationConstraint: types.BucketLocationConstraint(cred.Region)}})
return err
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
daadc592-3480-44ef-b9b4-02c6fc65972d
|
deleteS3Bucket
|
['"context"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func deleteS3Bucket(client *s3.Client, bucketName string) error {
// empty bucket
err := emptyS3Bucket(client, bucketName)
if err != nil {
return err
}
// delete bucket
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: &bucketName})
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
32d01c2a-afe1-4e86-99bc-33c1b77aeff5
|
emptyS3Bucket
|
['"context"', '"github.com/aws/aws-sdk-go-v2/service/s3"', '"github.com/aws/aws-sdk-go-v2/service/s3/types"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func emptyS3Bucket(client *s3.Client, bucketName string) error {
// List objects in the bucket
objects, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: &bucketName,
})
if err != nil {
return err
}
// Delete objects in the bucket
if len(objects.Contents) > 0 {
objectIdentifiers := make([]types.ObjectIdentifier, len(objects.Contents))
for i, object := range objects.Contents {
objectIdentifiers[i] = types.ObjectIdentifier{Key: object.Key}
}
quiet := true
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: &bucketName,
Delete: &types.Delete{
Objects: objectIdentifiers,
Quiet: &quiet,
},
})
if err != nil {
return err
}
}
// Check if there are more objects to delete and handle pagination
if *objects.IsTruncated {
return emptyS3Bucket(client, bucketName)
}
return nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
786eed49-16f3-4660-a78e-0e291aa495e5
|
createSecretForAWSS3Bucket
|
['"fmt"', '"os"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createSecretForAWSS3Bucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
cred := getAWSCredentialFromCluster(oc)
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
f1, err1 := os.Create(dirname + "/aws_access_key_id")
o.Expect(err1).NotTo(o.HaveOccurred())
defer f1.Close()
_, err = f1.WriteString(cred.AccessKeyID)
o.Expect(err).NotTo(o.HaveOccurred())
f2, err2 := os.Create(dirname + "/aws_secret_access_key")
o.Expect(err2).NotTo(o.HaveOccurred())
defer f2.Close()
_, err = f2.WriteString(cred.SecretAccessKey)
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "https://s3." + cred.Region + ".amazonaws.com"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/aws_access_key_id", "--from-file=access_key_secret="+dirname+"/aws_secret_access_key", "--from-literal=region="+cred.Region, "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
53ac70a5-2db1-4af9-9c35-965b04e198f7
|
createSecretForGCSBucket
|
['"context"', '"fmt"', '"os"', '"github.com/aws/aws-sdk-go-v2/credentials"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createSecretForGCSBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
// for GCP STS clusters, get gcp-credentials from env var GOOGLE_APPLICATION_CREDENTIALS
// TODO: support using STS token to create the secret
_, err = oc.AdminKubeClient().CoreV1().Secrets("kube-system").Get(context.Background(), "gcp-credentials", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
gcsCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", ns, "--from-literal=bucketname="+bucketName, "--from-file=key.json="+gcsCred).Execute()
}
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/gcp-credentials", "-n", "kube-system", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", ns, "--from-literal=bucketname="+bucketName, "--from-file=key.json="+dirname+"/service_account.json").Execute()
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
0994653b-f2c1-4809-9aaa-652475bb8068
|
createSecretForSwiftContainer
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createSecretForSwiftContainer(oc *exutil.CLI, containerName, secretName, ns string, cred *exutil.OpenstackCredentials) error {
userID, domainID := exutil.GetOpenStackUserIDAndDomainID(cred)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName,
"--from-literal=auth_url="+cred.Clouds.Openstack.Auth.AuthURL,
"--from-literal=username="+cred.Clouds.Openstack.Auth.Username,
"--from-literal=user_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=user_domain_id="+domainID,
"--from-literal=user_id="+userID,
"--from-literal=password="+cred.Clouds.Openstack.Auth.Password,
"--from-literal=domain_id="+domainID,
"--from-literal=domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=container_name="+containerName,
"--from-literal=project_id="+cred.Clouds.Openstack.Auth.ProjectID,
"--from-literal=project_name="+cred.Clouds.Openstack.Auth.ProjectName,
"--from-literal=project_domain_id="+domainID,
"--from-literal=project_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName).Execute()
return err
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
8a64d488-e8b9-4f01-83be-69250df15549
|
createSecretForAzureContainer
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func createSecretForAzureContainer(oc *exutil.CLI, bucketName, secretName, ns string) error {
environment := "AzureGlobal"
accountName, accountKey := getAzureStorageAccount(oc)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName, "--from-literal=environment="+environment, "--from-literal=container="+bucketName, "--from-literal=account_name="+accountName, "--from-literal=account_key="+accountKey).Execute()
return err
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
62ff6e78-58ba-48e8-b269-19c57b36e555
|
getODFCreds
|
['"net/http"', '"os"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
['s3Credential']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getODFCreds(oc *exutil.CLI) s3Credential {
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/noobaa-admin", "-n", "openshift-storage", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/AWS_ACCESS_KEY_ID")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/AWS_SECRET_ACCESS_KEY")
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://" + getRouteAddress(oc, "openshift-storage", "s3")
return s3Credential{Endpoint: endpoint, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
e1fc1237-e849-475d-81a0-9ce7fae69a06
|
getMinIOCreds
|
['"net/http"', '"os"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"']
|
['s3Credential']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getMinIOCreds(oc *exutil.CLI, ns string) s3Credential {
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+minioSecret, "-n", ns, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://" + getRouteAddress(oc, ns, "minio")
return s3Credential{Endpoint: endpoint, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
586114b7-0d13-4fce-9f3e-0bd96ea00404
|
newS3Client
|
['"context"', '"crypto/tls"', '"net/http"', '"net/url"', '"github.com/aws/aws-sdk-go-v2/aws"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/credentials"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
['s3Credential']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func newS3Client(cred s3Credential) *s3.Client {
var err error
var cfg aws.Config
if len(cred.Endpoint) > 0 {
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: cred.Endpoint,
HostnameImmutable: true,
Source: aws.EndpointSourceCustom,
}, nil
})
// For ODF and Minio, they're deployed in OCP clusters
// In some clusters, we can't connect it without proxy, here add proxy settings to s3 client when there has http_proxy or https_proxy in the env var
httpClient := awshttp.NewBuildableClient().WithTransportOptions(func(tr *http.Transport) {
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr.Proxy = http.ProxyURL(proxyURL)
}
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
})
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithEndpointResolverWithOptions(customResolver),
config.WithHTTPClient(httpClient),
config.WithRegion("auto"))
} else {
// aws s3
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithRegion(cred.Region))
}
o.Expect(err).NotTo(o.HaveOccurred())
return s3.NewFromConfig(cfg)
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
f6df6b91-e683-4634-bd79-4d6391879f98
|
getStorageClassName
|
['"context"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getStorageClassName(oc *exutil.CLI) (string, error) {
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return "", err
}
if len(scs.Items) == 0 {
return "", fmt.Errorf("there is no storageclass in the cluster")
}
for _, sc := range scs.Items {
if sc.ObjectMeta.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
return sc.Name, nil
}
}
return scs.Items[0].Name, nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
279d2272-df5d-44fe-af90-e7e0788f44b9
|
getSATokenFromSecret
|
['"context"', '"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getSATokenFromSecret(oc *exutil.CLI, name, ns string) string {
secrets, err := oc.AdminKubeClient().CoreV1().Secrets(ns).List(context.Background(), metav1.ListOptions{})
if err != nil {
return ""
}
var secret string
for _, s := range secrets.Items {
if strings.Contains(s.Name, name+"-token") {
secret = s.Name
break
}
}
dirname := "/tmp/" + oc.Namespace() + "-sa"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+secret, "-n", ns, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken, err := os.ReadFile(dirname + "/token")
o.Expect(err).NotTo(o.HaveOccurred())
return string(bearerToken)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
da741c31-beb3-48fe-adcb-f579c20e6a2f
|
prepareResourcesForLokiStack
|
['"context"', '"fmt"', '"os"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func (l lokiStack) prepareResourcesForLokiStack(oc *exutil.CLI) error {
var err error
if len(l.BucketName) == 0 {
return fmt.Errorf("the bucketName should not be empty")
}
switch l.StorageType {
case "s3":
{
if exutil.IsWorkloadIdentityCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg := readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
stsClient := newStsClient(cfg)
awsAccountID, _ := getAwsAccount(stsClient)
oidcName, err := getOIDC(oc)
o.Expect(err).NotTo(o.HaveOccurred())
lokiIAMRoleName := l.Name + "-" + exutil.GetRandomString()
roleArn := createIAMRoleForLokiSTSDeployment(iamClient, oidcName, awsAccountID, l.Namespace, l.Name, lokiIAMRoleName)
os.Setenv("LOKI_ROLE_NAME_ON_STS", lokiIAMRoleName)
patchLokiOperatorWithAWSRoleArn(oc, "loki-operator", "openshift-operators-redhat", roleArn)
var s3AssumeRoleName string
defer func() {
deleteIAMroleonAWS(iamClient, s3AssumeRoleName)
}()
s3AssumeRoleArn, s3AssumeRoleName := createS3AssumeRole(stsClient, iamClient, l.Name)
createS3ObjectStorageBucketWithSTS(cfg, stsClient, s3AssumeRoleArn, l.BucketName)
createObjectStorageSecretOnAWSSTSCluster(oc, region, l.StorageSecret, l.BucketName, l.Namespace)
} else {
cred := getAWSCredentialFromCluster(oc)
client := newS3Client(cred)
err = createS3Bucket(client, l.BucketName, cred)
if err != nil {
return err
}
err = createSecretForAWSS3Bucket(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
if !readAzureCredentials() {
g.Skip("Azure Credentials not found. Skip case!")
} else {
performManagedIdentityAndSecretSetupForAzureWIF(oc, l.Name, l.Namespace, l.BucketName, l.StorageSecret)
}
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
if err1 != nil {
return fmt.Errorf("can't get azure storage account from cluster: %v", err1)
}
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.BucketName)
if err2 != nil {
return err2
}
err = exutil.CreateAzureStorageBlobContainer(client)
if err != nil {
return err
}
err = createSecretForAzureContainer(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
}
case "gcs":
{
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
err = exutil.CreateGCSBucket(projectID, l.BucketName)
if err != nil {
return err
}
if exutil.IsWorkloadIdentityCluster(oc) {
clusterName := getInfrastructureName(oc)
gcsSAName := generateServiceAccountNameForGCS(clusterName)
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME", gcsSAName)
projectNumber, err1 := getGCPProjectNumber(projectID)
if err1 != nil {
return fmt.Errorf("can't get GCP project number: %v", err1)
}
poolID, err2 := getPoolID(oc)
if err2 != nil {
return fmt.Errorf("can't get pool ID: %v", err2)
}
sa, err3 := createServiceAccountOnGCP(projectID, gcsSAName)
if err3 != nil {
return fmt.Errorf("can't create service account: %v", err3)
}
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL", sa.Email)
err4 := grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, l.Namespace, l.Name, sa.Email)
if err4 != nil {
return fmt.Errorf("can't add roles to the serviceaccount: %v", err4)
}
err = createSecretForGCSBucketWithSTS(oc, projectNumber, poolID, sa.Email, l.Namespace, l.StorageSecret, l.BucketName)
} else {
err = createSecretForGCSBucket(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.CreateOpenStackContainer(client, l.BucketName)
if err != nil {
return err
}
err = createSecretForSwiftContainer(oc, l.BucketName, l.StorageSecret, l.Namespace, cred)
}
case "odf":
{
err = createObjectBucketClaim(oc, l.Namespace, l.BucketName)
if err != nil {
return err
}
err = createSecretForODFBucket(oc, l.BucketName, l.StorageSecret, l.Namespace)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
client := newS3Client(cred)
err = createS3Bucket(client, l.BucketName, cred)
if err != nil {
return err
}
err = createSecretForMinIOBucket(oc, l.BucketName, l.StorageSecret, l.Namespace, minioNS)
}
}
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
5d4e0580-ae68-4b34-91d3-2857f1542303
|
removeObjectStorage
|
['"context"', '"os"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
['resource']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func (l lokiStack) removeObjectStorage(oc *exutil.CLI) {
resource{"secret", l.StorageSecret, l.Namespace}.clear(oc)
var err error
switch l.StorageType {
case "s3":
{
if exutil.IsWorkloadIdentityCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg := readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
stsClient := newStsClient(cfg)
var s3AssumeRoleName string
defer func() {
deleteIAMroleonAWS(iamClient, s3AssumeRoleName)
}()
s3AssumeRoleArn, s3AssumeRoleName := createS3AssumeRole(stsClient, iamClient, l.Name)
if checkIfS3bucketExistsWithSTS(cfg, stsClient, s3AssumeRoleArn, l.BucketName) {
deleteS3bucketWithSTS(cfg, stsClient, s3AssumeRoleArn, l.BucketName)
}
deleteIAMroleonAWS(iamClient, os.Getenv("LOKI_ROLE_NAME_ON_STS"))
os.Unsetenv("LOKI_ROLE_NAME_ON_STS")
} else {
cred := getAWSCredentialFromCluster(oc)
client := newS3Client(cred)
err = deleteS3Bucket(client, l.BucketName)
}
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
resourceGroup, err := getResourceGroupOnAzure(oc)
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
deleteManagedIdentityOnAzure(cred, azureSubscriptionID, resourceGroup, l.Name)
deleteAzureStorageAccount(cred, azureSubscriptionID, resourceGroup, os.Getenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT"))
os.Unsetenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT")
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.BucketName)
o.Expect(err2).NotTo(o.HaveOccurred())
err = exutil.DeleteAzureStorageBlobContainer(client)
}
}
case "gcs":
{
if exutil.IsWorkloadIdentityCluster(oc) {
sa := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
if sa == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_NAME is not set, no need to delete the serviceaccount")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
email := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
if email == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL is not set, no need to delete the policies")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
projectNumber, _ := getGCPProjectNumber(projectID)
poolID, _ := getPoolID(oc)
err = removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, l.Namespace, l.Name, email)
o.Expect(err).NotTo(o.HaveOccurred())
err = removeServiceAccountFromGCP("projects/" + projectID + "/serviceAccounts/" + email)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
err = exutil.DeleteGCSBucket(l.BucketName)
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.DeleteOpenStackContainer(client, l.BucketName)
}
case "odf":
{
err = deleteObjectBucketClaim(oc, l.Namespace, l.BucketName)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
client := newS3Client(cred)
err = deleteS3Bucket(client, l.BucketName)
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
1f04d439-1f80-4c2f-8b73-8e45621e166d
|
useExtraObjectStorage
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func useExtraObjectStorage(oc *exutil.CLI) string {
if checkODF(oc) {
e2e.Logf("use the existing ODF storage service")
return "odf"
}
ready, err := checkMinIO(oc, minioNS)
if ready {
e2e.Logf("use existing MinIO storage service")
return "minio"
}
if strings.Contains(err.Error(), "No resources found") || strings.Contains(err.Error(), "not found") {
e2e.Logf("deploy MinIO and use this MinIO as storage service")
deployMinIO(oc)
return "minio"
}
return ""
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
38e9c200-2227-4acf-b81d-d079d7f73ca9
|
getStorageType
|
['"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func getStorageType(oc *exutil.CLI) string {
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
{
return "s3"
}
case "gcp":
{
return "gcs"
}
case "azure":
{
return "azure"
}
case "openstack":
{
return "swift"
}
default:
{
return useExtraObjectStorage(oc)
}
}
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
e9e95357-e328-4e53-889a-3fa7419e530d
|
checkODF
|
['"context"', '"github.com/aws/aws-sdk-go-v2/service/s3"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func checkODF(oc *exutil.CLI) bool {
svcFound := false
expectedSC := []string{"openshift-storage.noobaa.io", "ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"}
var scInCluster []string
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, sc := range scs.Items {
scInCluster = append(scInCluster, sc.Name)
}
for _, s := range expectedSC {
if !contain(scInCluster, s) {
return false
}
}
_, err = oc.AdminKubeClient().CoreV1().Services("openshift-storage").Get(context.Background(), "s3", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return svcFound
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
faeb4bff-d6e6-49b2-867f-8028261f1ed6
|
checkMinIO
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func checkMinIO(oc *exutil.CLI, ns string) (bool, error) {
podReady, svcFound := false, false
pod, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "app=minio"})
if err != nil {
return false, err
}
if len(pod.Items) > 0 && pod.Items[0].Status.Phase == "Running" {
podReady = true
}
_, err = oc.AdminKubeClient().CoreV1().Services(ns).Get(context.Background(), "minio", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return podReady && svcFound, err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
14a8f36a-4605-4997-9579-409b7dea3037
|
deployMinIO
|
['"context"', '"os"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['resource']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki_storage.go
|
func deployMinIO(oc *exutil.CLI) {
// create namespace
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), minioNS, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// create secret
_, err = oc.AdminKubeClient().CoreV1().Secrets(minioNS).Get(context.Background(), minioSecret, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", minioSecret, "-n", minioNS, "--from-literal=access_key_id="+getRandomString(), "--from-literal=secret_access_key=passwOOrd"+getRandomString()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// deploy minIO
deployTemplate := exutil.FixturePath("testdata", "logging", "minIO", "deploy.yaml")
deployFile, err := processTemplate(oc, "-n", minioNS, "-f", deployTemplate, "-p", "NAMESPACE="+minioNS, "NAME=minio", "SECRET_NAME="+minioSecret)
defer os.Remove(deployFile)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("apply").Args("-f", deployFile, "-n", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for minio to be ready
for _, rs := range []string{"deployment", "svc", "route"} {
resource{rs, "minio", minioNS}.WaitForResourceToAppear(oc)
}
WaitForDeploymentPodsToBeReady(oc, minioNS, "minio")
}
|
netobserv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.