element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
91b34d54-27b1-4047-82b0-7cd5a8d2688d
|
deployLokiPVC
|
['LokiPersistentVolumeClaim']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func (loki *LokiPersistentVolumeClaim) deployLokiPVC(oc *exutil.CLI) {
e2e.Logf("Deploy Loki PVC")
parameters := []string{"--ignore-unknown-parameters=true", "-f", loki.Template, "-p", "NAMESPACE=" + loki.Namespace}
exutil.ApplyNsResourceFromTemplate(oc, loki.Namespace, parameters...)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
5270b917-aa1c-4474-83b8-a7f03116a028
|
deployLokiStorage
|
['LokiStorage']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func (loki *LokiStorage) deployLokiStorage(oc *exutil.CLI) {
e2e.Logf("Deploy Loki storage")
parameters := []string{"--ignore-unknown-parameters=true", "-f", loki.Template, "-p", "NAMESPACE=" + loki.Namespace}
exutil.ApplyNsResourceFromTemplate(oc, loki.Namespace, parameters...)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
7f8c1a93-576e-4a5b-aac5-eb1fdc43ad09
|
deleteLokiStorage
|
['LokiStorage']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func (loki *LokiStorage) deleteLokiStorage(oc *exutil.CLI) {
e2e.Logf("Delete Loki PVC")
command1 := []string{"pod", "loki", "-n", loki.Namespace}
_, err1 := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command1...).Output()
command2 := []string{"configmap", "loki-config", "-n", loki.Namespace}
_, err2 := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command2...).Output()
command3 := []string{"service", "loki", "-n", loki.Namespace}
_, err3 := oc.AsAdmin().WithoutNamespace().Run("delete").Args(command3...).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(err3).NotTo(o.HaveOccurred())
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
ab2fce66-580d-4f44-9bc0-87730a44b813
|
deployLokiStack
|
['"fmt"', '"reflect"']
|
['lokiStack']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func (l lokiStack) deployLokiStack(oc *exutil.CLI) error {
parameters := []string{"--ignore-unknown-parameters=true", "-f", l.Template, "-p"}
lokistack := reflect.ValueOf(&l).Elem()
for i := 0; i < lokistack.NumField(); i++ {
if lokistack.Field(i).Interface() != "" {
if lokistack.Type().Field(i).Name == "StorageType" {
if lokistack.Field(i).Interface() == "odf" || lokistack.Field(i).Interface() == "minio" {
parameters = append(parameters, fmt.Sprintf("%s=%s", lokistack.Type().Field(i).Name, "s3"))
} else {
parameters = append(parameters, fmt.Sprintf("%s=%s", lokistack.Type().Field(i).Name, lokistack.Field(i).Interface()))
}
} else {
if lokistack.Type().Field(i).Name == "Template" {
continue
} else {
parameters = append(parameters, fmt.Sprintf("%s=%s", lokistack.Type().Field(i).Name, lokistack.Field(i).Interface()))
}
}
}
}
file, err := processTemplate(oc, parameters...)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", l.Namespace).Execute()
return err
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
8fcd8e40-2683-4ec2-93c0-c73a4256593e
|
waitForLokiStackToBeReady
|
['lokiStack']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func (l lokiStack) waitForLokiStackToBeReady(oc *exutil.CLI) error {
var err error
for _, deploy := range []string{l.Name + "-distributor", l.Name + "-gateway", l.Name + "-querier", l.Name + "-query-frontend"} {
err = waitForDeploymentPodsToBeReady(oc, l.Namespace, deploy)
}
for _, ss := range []string{l.Name + "-compactor", l.Name + "-index-gateway", l.Name + "-ingester"} {
err = waitForStatefulsetReady(oc, l.Namespace, ss)
}
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
7dd59787-23cd-4e70-aa3f-7586b11d26f5
|
removeLokiStack
|
['lokiStack']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func (l lokiStack) removeLokiStack(oc *exutil.CLI) {
Resource{"lokistack", l.Name, l.Namespace}.clear(oc)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "-n", l.Namespace, "-l", "app.kubernetes.io/instance="+l.Name).Execute()
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
544427ed-de68-4d0e-b58f-548029e630c5
|
getOIDC
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func getOIDC(oc *exutil.CLI) (string, error) {
oidc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output()
if err != nil {
return "", err
}
return strings.TrimPrefix(oidc, "https://"), nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
fe8ee9ff-6478-4595-a1ce-c301328a974a
|
getLokiChannel
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/loki.go
|
func getLokiChannel(oc *exutil.CLI, catalog string) (lokiChannel string, err error) {
channels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", "-l", "catalog="+catalog, "-n", "openshift-marketplace", "-o=jsonpath={.items[?(@.metadata.name==\"loki-operator\")].status.channels[*].name}").Output()
channelArr := strings.Split(channels, " ")
return channelArr[len(channelArr)-1], err
}
|
netobserv
| ||||
file
|
openshift/openshift-tests-private
|
306e99fe-f0d8-429d-b85a-f4e1964e5cfc
|
metrics
|
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
package netobserv
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// prometheusQueryResult the response of querying prometheus APIs
type prometheusQueryResult struct {
Data struct {
Result []metric `json:"result"`
ResultType string `json:"resultType"`
} `json:"data"`
Status string `json:"status"`
}
// metric the prometheus metric
type metric struct {
Metric struct {
Name string `json:"__name__"`
Cluster string `json:"cluster,omitempty"`
Container string `json:"container,omitempty"`
ContainerName string `json:"containername,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Instance string `json:"instance,omitempty"`
Job string `json:"job,omitempty"`
Namespace string `json:"namespace,omitempty"`
Path string `json:"path,omitempty"`
Pod string `json:"pod,omitempty"`
PodName string `json:"podname,omitempty"`
Service string `json:"service,omitempty"`
} `json:"metric"`
Value []interface{} `json:"value"`
}
func getMetric(oc *exutil.CLI, query string) ([]metric, error) {
bearerToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
promRoute := "https://" + getRouteAddress(oc, "openshift-monitoring", "prometheus-k8s")
res, err := queryPrometheus(promRoute, query, bearerToken)
if err != nil {
return []metric{}, err
}
attempts := 10
for len(res.Data.Result) == 0 && attempts > 0 {
time.Sleep(5 * time.Second)
res, err = queryPrometheus(promRoute, query, bearerToken)
if err != nil {
return []metric{}, err
}
attempts--
}
errMsg := fmt.Sprintf("0 results returned for query %s", query)
o.Expect(len(res.Data.Result)).Should(o.BeNumerically(">=", 1), errMsg)
return res.Data.Result, nil
}
// queryPrometheus returns the promtheus metrics which match the query string
// path: the api path, for example: /api/v1/query?
// query: the metric or alert you want to search
// action: it can be "GET", "get", "Get", "POST", "post", "Post"
func queryPrometheus(promRoute string, query string, bearerToken string) (*prometheusQueryResult, error) {
path := "/api/v1/query"
action := "GET"
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add("Authorization", "Bearer "+bearerToken)
params := url.Values{}
if len(query) > 0 {
params.Add("query", query)
}
var p prometheusQueryResult
resp, err := doHTTPRequest(h, promRoute, path, params.Encode(), action, false, 5, nil, 200)
if err != nil {
return nil, err
}
err = json.Unmarshal(resp, &p)
if err != nil {
return nil, err
}
return &p, nil
}
// return the first metric value
func popMetricValue(metrics []metric) float64 {
valInterface := metrics[0].Value[1]
val, _ := valInterface.(string)
value, err := strconv.ParseFloat(val, 64)
o.Expect(err).NotTo(o.HaveOccurred())
return value
}
// polls any prometheus metrics
func pollMetrics(oc *exutil.CLI, promQuery string) float64 {
var metricsVal float64
e2e.Logf("Query is %s", promQuery)
err := wait.PollUntilContextTimeout(context.Background(), 60*time.Second, 300*time.Second, false, func(context.Context) (bool, error) {
metrics, err := getMetric(oc, promQuery)
if err != nil {
return false, err
}
metricsVal = popMetricValue(metrics)
if metricsVal <= 0 {
e2e.Logf("%s did not return metrics value > 0, will try again", promQuery)
}
return metricsVal > 0, nil
})
msg := fmt.Sprintf("%s did not return valid metrics in 300 seconds", promQuery)
exutil.AssertWaitPollNoErr(err, msg)
return metricsVal
}
// verify FLP metrics
func verifyFLPMetrics(oc *exutil.CLI) {
query := "sum(netobserv_ingest_flows_processed)"
pollMetrics(oc, query)
query = "sum(netobserv_loki_sent_entries_total)"
pollMetrics(oc, query)
}
// verify eBPF metrics
func verifyEBPFMetrics(oc *exutil.CLI) {
query := "sum(netobserv_agent_exported_batch_total)"
pollMetrics(oc, query)
query = "sum(netobserv_agent_evictions_total)"
pollMetrics(oc, query)
}
// verify eBPF filter metrics
func verifyEBPFFilterMetrics(oc *exutil.CLI) {
query := "sum(netobserv_agent_filtered_flows_total)"
pollMetrics(oc, query)
}
// verify eBPF feature metrics
func verifyEBPFFeatureMetrics(oc *exutil.CLI, feature string) {
query := fmt.Sprintf("100 * sum(rate(netobserv_agent_flows_enrichment_total{has%s=\"true\"}[1m])) / sum(rate(netobserv_agent_flows_enrichment_total[1m]))", feature)
metrics := pollMetrics(oc, query)
switch feature {
case "Drops":
// Expected to be around 4
o.Expect(metrics).Should(o.BeNumerically("~", 2.5, 7), "Drop metrics are beyond threshold values")
case "RTT":
// Expected to be around 55
o.Expect(metrics).Should(o.BeNumerically("~", 50, 60), "RTT metrics are beyond threshold values")
case "DNS":
// Expected to be around 1
o.Expect(metrics).Should(o.BeNumerically("~", 0.2, 5), "DNS metrics are beyond threshold values")
case "Xlat":
// Expected to be around 18
o.Expect(metrics).Should(o.BeNumerically("~", 15, 22), "Xlat metrics are beyond threshold values")
}
}
func getMetricsScheme(oc *exutil.CLI, servicemonitor string, namespace string) (string, error) {
out, err := oc.AsAdmin().Run("get").Args("servicemonitor", servicemonitor, "-n", namespace, "-o", "jsonpath='{.spec.endpoints[].scheme}'").Output()
return out, err
}
func getMetricsServerName(oc *exutil.CLI, servicemonitor string, namespace string) (string, error) {
out, err := oc.AsAdmin().Run("get").Args("servicemonitor", servicemonitor, "-n", namespace, "-o", "jsonpath='{.spec.endpoints[].tlsConfig.serverName}'").Output()
return out, err
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
c791a144-88c0-4b0a-a212-51672e6ba08f
|
getMetric
|
['"fmt"', '"time"']
|
['metric']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func getMetric(oc *exutil.CLI, query string) ([]metric, error) {
bearerToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
promRoute := "https://" + getRouteAddress(oc, "openshift-monitoring", "prometheus-k8s")
res, err := queryPrometheus(promRoute, query, bearerToken)
if err != nil {
return []metric{}, err
}
attempts := 10
for len(res.Data.Result) == 0 && attempts > 0 {
time.Sleep(5 * time.Second)
res, err = queryPrometheus(promRoute, query, bearerToken)
if err != nil {
return []metric{}, err
}
attempts--
}
errMsg := fmt.Sprintf("0 results returned for query %s", query)
o.Expect(len(res.Data.Result)).Should(o.BeNumerically(">=", 1), errMsg)
return res.Data.Result, nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
4785aa83-5ece-488b-a3d5-3ee07a3c5c74
|
queryPrometheus
|
['"encoding/json"', '"net/http"', '"net/url"']
|
['prometheusQueryResult']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func queryPrometheus(promRoute string, query string, bearerToken string) (*prometheusQueryResult, error) {
path := "/api/v1/query"
action := "GET"
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add("Authorization", "Bearer "+bearerToken)
params := url.Values{}
if len(query) > 0 {
params.Add("query", query)
}
var p prometheusQueryResult
resp, err := doHTTPRequest(h, promRoute, path, params.Encode(), action, false, 5, nil, 200)
if err != nil {
return nil, err
}
err = json.Unmarshal(resp, &p)
if err != nil {
return nil, err
}
return &p, nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
9a6716f1-5465-43df-951c-29c9d7d5b609
|
popMetricValue
|
['"strconv"']
|
['metric']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func popMetricValue(metrics []metric) float64 {
valInterface := metrics[0].Value[1]
val, _ := valInterface.(string)
value, err := strconv.ParseFloat(val, 64)
o.Expect(err).NotTo(o.HaveOccurred())
return value
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
de49192d-181a-4512-bce8-16709064c037
|
pollMetrics
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func pollMetrics(oc *exutil.CLI, promQuery string) float64 {
var metricsVal float64
e2e.Logf("Query is %s", promQuery)
err := wait.PollUntilContextTimeout(context.Background(), 60*time.Second, 300*time.Second, false, func(context.Context) (bool, error) {
metrics, err := getMetric(oc, promQuery)
if err != nil {
return false, err
}
metricsVal = popMetricValue(metrics)
if metricsVal <= 0 {
e2e.Logf("%s did not return metrics value > 0, will try again", promQuery)
}
return metricsVal > 0, nil
})
msg := fmt.Sprintf("%s did not return valid metrics in 300 seconds", promQuery)
exutil.AssertWaitPollNoErr(err, msg)
return metricsVal
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
b4f25da5-fd7b-47cf-af71-d26a1e68215f
|
verifyFLPMetrics
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func verifyFLPMetrics(oc *exutil.CLI) {
query := "sum(netobserv_ingest_flows_processed)"
pollMetrics(oc, query)
query = "sum(netobserv_loki_sent_entries_total)"
pollMetrics(oc, query)
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
72924e18-db7c-48e8-ae32-decadd2bc1a6
|
verifyEBPFMetrics
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func verifyEBPFMetrics(oc *exutil.CLI) {
query := "sum(netobserv_agent_exported_batch_total)"
pollMetrics(oc, query)
query = "sum(netobserv_agent_evictions_total)"
pollMetrics(oc, query)
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
25a2df59-4cfc-4c46-a9b5-6cd1cb989321
|
verifyEBPFFilterMetrics
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func verifyEBPFFilterMetrics(oc *exutil.CLI) {
query := "sum(netobserv_agent_filtered_flows_total)"
pollMetrics(oc, query)
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
41437670-e21b-42d4-ac8f-7a4cfb69bd7a
|
verifyEBPFFeatureMetrics
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func verifyEBPFFeatureMetrics(oc *exutil.CLI, feature string) {
query := fmt.Sprintf("100 * sum(rate(netobserv_agent_flows_enrichment_total{has%s=\"true\"}[1m])) / sum(rate(netobserv_agent_flows_enrichment_total[1m]))", feature)
metrics := pollMetrics(oc, query)
switch feature {
case "Drops":
// Expected to be around 4
o.Expect(metrics).Should(o.BeNumerically("~", 2.5, 7), "Drop metrics are beyond threshold values")
case "RTT":
// Expected to be around 55
o.Expect(metrics).Should(o.BeNumerically("~", 50, 60), "RTT metrics are beyond threshold values")
case "DNS":
// Expected to be around 1
o.Expect(metrics).Should(o.BeNumerically("~", 0.2, 5), "DNS metrics are beyond threshold values")
case "Xlat":
// Expected to be around 18
o.Expect(metrics).Should(o.BeNumerically("~", 15, 22), "Xlat metrics are beyond threshold values")
}
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
197c1653-6685-405c-a7c7-9897f33226f7
|
getMetricsScheme
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func getMetricsScheme(oc *exutil.CLI, servicemonitor string, namespace string) (string, error) {
out, err := oc.AsAdmin().Run("get").Args("servicemonitor", servicemonitor, "-n", namespace, "-o", "jsonpath='{.spec.endpoints[].scheme}'").Output()
return out, err
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
ca77cea4-f645-4f5e-a3d1-fdc390221da7
|
getMetricsServerName
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/metrics.go
|
func getMetricsServerName(oc *exutil.CLI, servicemonitor string, namespace string) (string, error) {
out, err := oc.AsAdmin().Run("get").Args("servicemonitor", servicemonitor, "-n", namespace, "-o", "jsonpath='{.spec.endpoints[].tlsConfig.serverName}'").Output()
return out, err
}
|
netobserv
| |||||
file
|
openshift/openshift-tests-private
|
acc7636b-9f94-464e-9bfa-4013db8f151c
|
operator
|
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"time"
git "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
"sigs.k8s.io/yaml"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
package netobserv
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"time"
git "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
"sigs.k8s.io/yaml"
)
// SubscriptionObjects objects are used to create operators via OLM
type SubscriptionObjects struct {
OperatorName string
Namespace string
OperatorGroup string // the file used to create operator group
Subscription string // the file used to create subscription
PackageName string
CatalogSource *CatalogSourceObjects `json:",omitempty"`
OperatorPodLabel string
}
// CatalogSourceObjects defines the source used to subscribe an operator
type CatalogSourceObjects struct {
Channel string `json:",omitempty"`
SourceName string `json:",omitempty"`
SourceNamespace string `json:",omitempty"`
}
// OperatorNamespace struct to handle creation of namespace
type OperatorNamespace struct {
Name string
NamespaceTemplate string
}
type version struct {
Operator struct {
Branch string `yaml:"branch"`
TagName string `yaml:"tagName"`
} `yaml:"operator"`
FlowlogsPipeline struct {
Image string `yaml:"image"`
} `yaml:"flowlogs-pipeline"`
ConsolePlugin struct {
Image string `yaml:"image"`
} `yaml:"consolePlugin"`
}
// deploy/undeploys network-observability operator given action is true/false
func (versions *version) deployNetobservOperator(action bool, tempdir *string) error {
var (
deployCmd string
err error
)
if action {
err = versions.gitCheckout(tempdir)
if err != nil {
return err
}
defer os.RemoveAll(*tempdir)
e2e.Logf("cloned git repo successfully at %s", *tempdir)
var vers string
if versions.Operator.TagName == "" {
vers = "main"
} else {
vers = versions.Operator.TagName
}
deployCmd = "VERSION=" + vers + " make deploy"
} else {
e2e.Logf("undeploying operator")
deployCmd = "make undeploy"
}
cmd := exec.Command("bash", "-c", fmt.Sprintf("cd %s && %s", *tempdir, deployCmd))
err = cmd.Run()
if err != nil {
e2e.Logf("Failed action: %s for network-observability operator - err %s", deployCmd, err.Error())
return err
}
return nil
}
// parses version.yaml and converts to version struct
func (versions *version) versionMap() error {
componentVersions := "version.yaml"
versionsFixture := exutil.FixturePath("testdata", "netobserv", componentVersions)
vers, err := os.ReadFile(versionsFixture)
if err != nil {
return err
}
err = yaml.Unmarshal(vers, &versions)
if err != nil {
return err
}
e2e.Logf("versions in versionMap are %s", versions)
return nil
}
// clones operator git repo and switches to tag if specified in version.yaml
func (versions *version) gitCheckout(tempdir *string) error {
var err error
*tempdir, _ = ioutil.TempDir("", "netobserv")
operatorDir := "network-observability-operator"
operatorRepo := fmt.Sprintf("https://github.com/netobserv/%s.git", operatorDir)
repo, err := git.PlainClone(*tempdir, false, &git.CloneOptions{
URL: operatorRepo,
ReferenceName: "refs/heads/main",
SingleBranch: true,
})
if err != nil {
e2e.Logf("failed to clone git repo %s: %s", operatorRepo, err)
return err
}
e2e.Logf("cloned git repo for %s successfully at %s", operatorDir, *tempdir)
tree, err := repo.Worktree()
if err != nil {
return err
}
// Checkout our tag
if versions.Operator.TagName != "" {
e2e.Logf("Deploying tag %s\n", versions.Operator.TagName)
err = tree.Checkout(&git.CheckoutOptions{
Branch: plumbing.ReferenceName("refs/tags/" + versions.Operator.TagName),
})
if err != nil {
return err
}
os.Setenv("VERSION", versions.Operator.TagName)
}
return nil
}
// waitForPackagemanifestAppear waits for the packagemanifest to appear in the cluster
// chSource: bool value, true means the packagemanifests' source name must match the so.CatalogSource.SourceName, e.g.: oc get packagemanifests xxxx -l catalog=$source-name
func (so *SubscriptionObjects) waitForPackagemanifestAppear(oc *exutil.CLI, chSource bool) {
args := []string{"-n", so.CatalogSource.SourceNamespace, "packagemanifests"}
if chSource {
args = append(args, "-l", "catalog="+so.CatalogSource.SourceName)
} else {
args = append(args, so.PackageName)
}
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
packages, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
msg := fmt.Sprintf("%v", err)
if strings.Contains(msg, "No resources found") || strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
if strings.Contains(packages, so.PackageName) {
return true, nil
}
e2e.Logf("Waiting for packagemanifest/%s to appear", so.PackageName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Packagemanifest %s is not availabile", so.PackageName))
}
// setCatalogSourceObjects set the default values of channel, source namespace and source name if they're not specified
func (so *SubscriptionObjects) setCatalogSourceObjects(oc *exutil.CLI) {
// set channel
if so.CatalogSource.Channel == "" {
so.CatalogSource.Channel = "stable"
}
// set source namespace
if so.CatalogSource.SourceNamespace == "" {
so.CatalogSource.SourceNamespace = "openshift-marketplace"
}
// set source and check if the packagemanifest exists or not
if so.CatalogSource.SourceName != "" {
so.waitForPackagemanifestAppear(oc, true)
} else {
catsrc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("catsrc", "-n", so.CatalogSource.SourceNamespace, "qe-app-registry").Output()
if catsrc != "" && !(strings.Contains(catsrc, "NotFound")) {
so.CatalogSource.SourceName = "qe-app-registry"
so.waitForPackagemanifestAppear(oc, true)
} else {
so.waitForPackagemanifestAppear(oc, false)
source, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", so.PackageName, "-o", "jsonpath={.status.catalogSource}").Output()
if err != nil {
e2e.Logf("error getting catalog source name: %v", err)
}
so.CatalogSource.SourceName = source
}
}
}
// SubscribeOperator is used to subcribe the CLO and EO
func (so *SubscriptionObjects) SubscribeOperator(oc *exutil.CLI) {
// check if the namespace exists, if it doesn't exist, create the namespace
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), so.Namespace, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("The project %s is not found, create it now...", so.Namespace)
namespaceTemplate := exutil.FixturePath("testdata", "logging", "subscription", "namespace.yaml")
namespaceFile, err := processTemplate(oc, "-f", namespaceTemplate, "-p", "NAMESPACE_NAME="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", namespaceFile).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create project %s", so.Namespace))
}
}
// check the operator group, if no object found, then create an operator group in the project
og, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "og").Output()
o.Expect(err).NotTo(o.HaveOccurred())
msg := fmt.Sprintf("%v", og)
if strings.Contains(msg, "No resources found") {
// create operator group
ogFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.OperatorGroup, "-p", "OG_NAME="+so.Namespace, "NAMESPACE="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", ogFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create operatorgroup %s in %s project", so.Namespace, so.Namespace))
}
// check subscription, if there is no subscription objets, then create one
sub, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", so.Namespace, so.PackageName).Output()
if err != nil {
msg := fmt.Sprint("v%", sub)
if strings.Contains(msg, "NotFound") {
so.setCatalogSourceObjects(oc)
//create subscription object
subscriptionFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.Subscription, "-p", "PACKAGE_NAME="+so.PackageName, "NAMESPACE="+so.Namespace, "CHANNEL="+so.CatalogSource.Channel, "SOURCE="+so.CatalogSource.SourceName, "SOURCE_NAMESPACE="+so.CatalogSource.SourceNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", subscriptionFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create subscription %s in %s project", so.PackageName, so.Namespace))
}
}
//WaitForDeploymentPodsToBeReady(oc, so.Namespace, so.OperatorName)
}
func deleteNamespace(oc *exutil.CLI, ns string) {
err := oc.AdminKubeClient().CoreV1().Namespaces().Delete(context.Background(), ns, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Namespace %s is not deleted in 3 minutes", ns))
}
func (so *SubscriptionObjects) uninstallOperator(oc *exutil.CLI) {
Resource{"subscription", so.PackageName, so.Namespace}.clear(oc)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", so.Namespace, "csv", "-l", "operators.coreos.com/"+so.PackageName+"."+so.Namespace+"=").Execute()
// do not remove namespace openshift-logging and openshift-operators-redhat, and preserve the operatorgroup as there may have several operators deployed in one namespace
// for example: loki-operator and elasticsearch-operator
if so.Namespace != "openshift-logging" && so.Namespace != "openshift-operators-redhat" && so.Namespace != "openshift-operators" && so.Namespace != "openshift-netobserv-operator" && !strings.HasPrefix(so.Namespace, "e2e-test-") {
deleteNamespace(oc, so.Namespace)
}
}
func checkOperatorChannel(oc *exutil.CLI, operatorNamespace string, operatorName string) (string, error) {
channelName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace, "-o=jsonpath={.spec.channel}").Output()
if err != nil {
return "", err
}
return channelName, nil
}
func checkOperatorSource(oc *exutil.CLI, operatorNamespace string, operatorName string) (string, error) {
channelName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace, "-o=jsonpath={.spec.source}").Output()
if err != nil {
return "", err
}
return channelName, nil
}
func CheckOperatorStatus(oc *exutil.CLI, operatorNamespace string, operatorName string) bool {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", operatorNamespace).Execute()
if err == nil {
err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace).Execute()
if err1 == nil {
csvName, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 360*time.Second, false, func(context.Context) (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", operatorNamespace, "-o=jsonpath={.status.phase}").Output()
if err != nil {
return false, err
}
return csvState == "Succeeded", nil
})
return err == nil
}
}
e2e.Logf("%s operator will be created by tests", operatorName)
return false
}
func (ns *OperatorNamespace) DeployOperatorNamespace(oc *exutil.CLI) {
e2e.Logf("Creating operator namespace")
nsParameters := []string{"--ignore-unknown-parameters=true", "-f", ns.NamespaceTemplate}
exutil.ApplyClusterResourceFromTemplate(oc, nsParameters...)
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
4607f3c3-606d-411c-975d-83c314db16e6
|
deployNetobservOperator
|
['"fmt"', '"os"', '"os/exec"']
|
['version']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (versions *version) deployNetobservOperator(action bool, tempdir *string) error {
var (
deployCmd string
err error
)
if action {
err = versions.gitCheckout(tempdir)
if err != nil {
return err
}
defer os.RemoveAll(*tempdir)
e2e.Logf("cloned git repo successfully at %s", *tempdir)
var vers string
if versions.Operator.TagName == "" {
vers = "main"
} else {
vers = versions.Operator.TagName
}
deployCmd = "VERSION=" + vers + " make deploy"
} else {
e2e.Logf("undeploying operator")
deployCmd = "make undeploy"
}
cmd := exec.Command("bash", "-c", fmt.Sprintf("cd %s && %s", *tempdir, deployCmd))
err = cmd.Run()
if err != nil {
e2e.Logf("Failed action: %s for network-observability operator - err %s", deployCmd, err.Error())
return err
}
return nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
ed50d087-07eb-4d66-b7c9-0efaf7cace9f
|
versionMap
|
['"os"', '"sigs.k8s.io/yaml"']
|
['version']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (versions *version) versionMap() error {
componentVersions := "version.yaml"
versionsFixture := exutil.FixturePath("testdata", "netobserv", componentVersions)
vers, err := os.ReadFile(versionsFixture)
if err != nil {
return err
}
err = yaml.Unmarshal(vers, &versions)
if err != nil {
return err
}
e2e.Logf("versions in versionMap are %s", versions)
return nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
50e8e87f-d83e-4ec0-8b32-434a02652b1c
|
gitCheckout
|
['"fmt"', '"io/ioutil"', '"os"', '"github.com/go-git/go-git/v5/plumbing"']
|
['version']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (versions *version) gitCheckout(tempdir *string) error {
var err error
*tempdir, _ = ioutil.TempDir("", "netobserv")
operatorDir := "network-observability-operator"
operatorRepo := fmt.Sprintf("https://github.com/netobserv/%s.git", operatorDir)
repo, err := git.PlainClone(*tempdir, false, &git.CloneOptions{
URL: operatorRepo,
ReferenceName: "refs/heads/main",
SingleBranch: true,
})
if err != nil {
e2e.Logf("failed to clone git repo %s: %s", operatorRepo, err)
return err
}
e2e.Logf("cloned git repo for %s successfully at %s", operatorDir, *tempdir)
tree, err := repo.Worktree()
if err != nil {
return err
}
// Checkout our tag
if versions.Operator.TagName != "" {
e2e.Logf("Deploying tag %s\n", versions.Operator.TagName)
err = tree.Checkout(&git.CheckoutOptions{
Branch: plumbing.ReferenceName("refs/tags/" + versions.Operator.TagName),
})
if err != nil {
return err
}
os.Setenv("VERSION", versions.Operator.TagName)
}
return nil
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
b710e3cc-33c6-490f-966b-d81f753bbe1c
|
waitForPackagemanifestAppear
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['SubscriptionObjects']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (so *SubscriptionObjects) waitForPackagemanifestAppear(oc *exutil.CLI, chSource bool) {
args := []string{"-n", so.CatalogSource.SourceNamespace, "packagemanifests"}
if chSource {
args = append(args, "-l", "catalog="+so.CatalogSource.SourceName)
} else {
args = append(args, so.PackageName)
}
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
packages, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
msg := fmt.Sprintf("%v", err)
if strings.Contains(msg, "No resources found") || strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
if strings.Contains(packages, so.PackageName) {
return true, nil
}
e2e.Logf("Waiting for packagemanifest/%s to appear", so.PackageName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Packagemanifest %s is not availabile", so.PackageName))
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
40bcce57-6984-405e-ba3e-52e30ea4f2e1
|
setCatalogSourceObjects
|
['"strings"']
|
['SubscriptionObjects']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (so *SubscriptionObjects) setCatalogSourceObjects(oc *exutil.CLI) {
// set channel
if so.CatalogSource.Channel == "" {
so.CatalogSource.Channel = "stable"
}
// set source namespace
if so.CatalogSource.SourceNamespace == "" {
so.CatalogSource.SourceNamespace = "openshift-marketplace"
}
// set source and check if the packagemanifest exists or not
if so.CatalogSource.SourceName != "" {
so.waitForPackagemanifestAppear(oc, true)
} else {
catsrc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("catsrc", "-n", so.CatalogSource.SourceNamespace, "qe-app-registry").Output()
if catsrc != "" && !(strings.Contains(catsrc, "NotFound")) {
so.CatalogSource.SourceName = "qe-app-registry"
so.waitForPackagemanifestAppear(oc, true)
} else {
so.waitForPackagemanifestAppear(oc, false)
source, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", so.PackageName, "-o", "jsonpath={.status.catalogSource}").Output()
if err != nil {
e2e.Logf("error getting catalog source name: %v", err)
}
so.CatalogSource.SourceName = source
}
}
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
ad6ce31b-6629-48d6-8cda-b4f3f4b87e4b
|
SubscribeOperator
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', '"sigs.k8s.io/yaml"']
|
['SubscriptionObjects']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (so *SubscriptionObjects) SubscribeOperator(oc *exutil.CLI) {
// check if the namespace exists, if it doesn't exist, create the namespace
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), so.Namespace, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("The project %s is not found, create it now...", so.Namespace)
namespaceTemplate := exutil.FixturePath("testdata", "logging", "subscription", "namespace.yaml")
namespaceFile, err := processTemplate(oc, "-f", namespaceTemplate, "-p", "NAMESPACE_NAME="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", namespaceFile).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create project %s", so.Namespace))
}
}
// check the operator group, if no object found, then create an operator group in the project
og, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "og").Output()
o.Expect(err).NotTo(o.HaveOccurred())
msg := fmt.Sprintf("%v", og)
if strings.Contains(msg, "No resources found") {
// create operator group
ogFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.OperatorGroup, "-p", "OG_NAME="+so.Namespace, "NAMESPACE="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", ogFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create operatorgroup %s in %s project", so.Namespace, so.Namespace))
}
// check subscription, if there is no subscription objets, then create one
sub, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", so.Namespace, so.PackageName).Output()
if err != nil {
msg := fmt.Sprint("v%", sub)
if strings.Contains(msg, "NotFound") {
so.setCatalogSourceObjects(oc)
//create subscription object
subscriptionFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.Subscription, "-p", "PACKAGE_NAME="+so.PackageName, "NAMESPACE="+so.Namespace, "CHANNEL="+so.CatalogSource.Channel, "SOURCE="+so.CatalogSource.SourceName, "SOURCE_NAMESPACE="+so.CatalogSource.SourceNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", subscriptionFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create subscription %s in %s project", so.PackageName, so.Namespace))
}
}
//WaitForDeploymentPodsToBeReady(oc, so.Namespace, so.OperatorName)
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
e6782e33-310e-4132-b6cc-72288a0a8c8a
|
deleteNamespace
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func deleteNamespace(oc *exutil.CLI, ns string) {
err := oc.AdminKubeClient().CoreV1().Namespaces().Delete(context.Background(), ns, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Namespace %s is not deleted in 3 minutes", ns))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
5f2b60b2-bf3c-4e08-bb7a-82352f4ff6ad
|
uninstallOperator
|
['"strings"']
|
['SubscriptionObjects']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (so *SubscriptionObjects) uninstallOperator(oc *exutil.CLI) {
Resource{"subscription", so.PackageName, so.Namespace}.clear(oc)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", so.Namespace, "csv", "-l", "operators.coreos.com/"+so.PackageName+"."+so.Namespace+"=").Execute()
// do not remove namespace openshift-logging and openshift-operators-redhat, and preserve the operatorgroup as there may have several operators deployed in one namespace
// for example: loki-operator and elasticsearch-operator
if so.Namespace != "openshift-logging" && so.Namespace != "openshift-operators-redhat" && so.Namespace != "openshift-operators" && so.Namespace != "openshift-netobserv-operator" && !strings.HasPrefix(so.Namespace, "e2e-test-") {
deleteNamespace(oc, so.Namespace)
}
}
|
netobserv
| |||
function
|
openshift/openshift-tests-private
|
94dd7702-71ee-47ad-9468-343b889aad94
|
checkOperatorChannel
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func checkOperatorChannel(oc *exutil.CLI, operatorNamespace string, operatorName string) (string, error) {
channelName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace, "-o=jsonpath={.spec.channel}").Output()
if err != nil {
return "", err
}
return channelName, nil
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
3c66f762-c393-4946-8637-71ce657a7c4b
|
checkOperatorSource
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func checkOperatorSource(oc *exutil.CLI, operatorNamespace string, operatorName string) (string, error) {
channelName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace, "-o=jsonpath={.spec.source}").Output()
if err != nil {
return "", err
}
return channelName, nil
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
c3069cb3-a734-44aa-870b-bb8c436015e6
|
CheckOperatorStatus
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func CheckOperatorStatus(oc *exutil.CLI, operatorNamespace string, operatorName string) bool {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", operatorNamespace).Execute()
if err == nil {
err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace).Execute()
if err1 == nil {
csvName, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", operatorName, "-n", operatorNamespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 360*time.Second, false, func(context.Context) (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", operatorNamespace, "-o=jsonpath={.status.phase}").Output()
if err != nil {
return false, err
}
return csvState == "Succeeded", nil
})
return err == nil
}
}
e2e.Logf("%s operator will be created by tests", operatorName)
return false
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
ed58324e-33dd-4373-a207-6d64f7efc096
|
DeployOperatorNamespace
|
['OperatorNamespace']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/operator.go
|
func (ns *OperatorNamespace) DeployOperatorNamespace(oc *exutil.CLI) {
e2e.Logf("Creating operator namespace")
nsParameters := []string{"--ignore-unknown-parameters=true", "-f", ns.NamespaceTemplate}
exutil.ApplyClusterResourceFromTemplate(oc, nsParameters...)
}
|
netobserv
| ||||
test
|
openshift/openshift-tests-private
|
f792c09c-4aac-414e-873c-0aecd058cebc
|
test_exporters
|
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
filePath "path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_exporters.go
|
package netobserv
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
filePath "path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-netobserv] Network_Observability", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("netobserv", exutil.KubeConfigPath())
// NetObserv Operator variables
netobservNS = "openshift-netobserv-operator"
NOPackageName = "netobserv-operator"
NOcatSrc = Resource{"catsrc", "netobserv-konflux-fbc", "openshift-marketplace"}
NOSource = CatalogSourceObjects{"stable", NOcatSrc.Name, NOcatSrc.Namespace}
// Template directories
baseDir = exutil.FixturePath("testdata", "netobserv")
subscriptionDir = exutil.FixturePath("testdata", "netobserv", "subscription")
flowFixturePath = filePath.Join(baseDir, "flowcollector_v1beta2_template.yaml")
// Operator namespace object
OperatorNS = OperatorNamespace{
Name: netobservNS,
NamespaceTemplate: filePath.Join(subscriptionDir, "namespace.yaml"),
}
NO = SubscriptionObjects{
OperatorName: "netobserv-operator",
Namespace: netobservNS,
PackageName: NOPackageName,
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "allnamespace-og.yaml"),
CatalogSource: &NOSource,
}
OtelNS = OperatorNamespace{
Name: "openshift-opentelemetry-operator",
NamespaceTemplate: filePath.Join(subscriptionDir, "namespace.yaml"),
}
OTELSource = CatalogSourceObjects{"stable", "redhat-operators", "openshift-marketplace"}
OTEL = SubscriptionObjects{
OperatorName: "opentelemetry-operator",
Namespace: OtelNS.Name,
PackageName: "opentelemetry-product",
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "allnamespace-og.yaml"),
CatalogSource: &OTELSource,
}
)
g.BeforeEach(func() {
if strings.Contains(os.Getenv("E2E_RUN_TAGS"), "disconnected") {
g.Skip("Skipping tests for disconnected profiles")
}
g.By("Deploy konflux FBC and ImageDigestMirrorSet")
imageDigest := filePath.Join(subscriptionDir, "image-digest-mirror-set.yaml")
catSrcTemplate := filePath.Join(subscriptionDir, "catalog-source.yaml")
catsrcErr := NOcatSrc.applyFromTemplate(oc, "-n", NOcatSrc.Namespace, "-f", catSrcTemplate)
o.Expect(catsrcErr).NotTo(o.HaveOccurred())
WaitUntilCatSrcReady(oc, NOcatSrc.Name)
ApplyResourceFromFile(oc, netobservNS, imageDigest)
g.By(fmt.Sprintf("Subscribe operators to %s channel", NOSource.Channel))
// check if Network Observability Operator is already present
NOexisting := CheckOperatorStatus(oc, NO.Namespace, NO.PackageName)
// create operatorNS and deploy operator if not present
if !NOexisting {
OperatorNS.DeployOperatorNamespace(oc)
NO.SubscribeOperator(oc)
// check if NO operator is deployed
WaitForPodsReadyWithLabel(oc, NO.Namespace, "app="+NO.OperatorName)
NOStatus := CheckOperatorStatus(oc, NO.Namespace, NO.PackageName)
o.Expect((NOStatus)).To(o.BeTrue())
// check if flowcollector API exists
flowcollectorAPIExists, err := isFlowCollectorAPIExists(oc)
o.Expect((flowcollectorAPIExists)).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
})
g.It("Author:aramesha-High-64156-Verify IPFIX-exporter [Serial]", func() {
namespace := oc.Namespace()
g.By("Create IPFIX namespace")
ipfixCollectorTemplatePath := filePath.Join(baseDir, "exporters", "ipfix-collector.yaml")
IPFIXns := "ipfix"
defer oc.DeleteSpecifiedNamespaceAsAdmin(IPFIXns)
oc.CreateSpecifiedNamespaceAsAdmin(IPFIXns)
exutil.SetNamespacePrivileged(oc, IPFIXns)
g.By("Deploy IPFIX collector")
createResourceFromFile(oc, IPFIXns, ipfixCollectorTemplatePath)
WaitForPodsReadyWithLabel(oc, IPFIXns, "app=flowlogs-pipeline")
IPFIXconfig := map[string]interface{}{
"ipfix": map[string]interface{}{
"targetHost": "flowlogs-pipeline.ipfix.svc.cluster.local",
"targetPort": 2055,
"transport": "UDP"},
"type": "IPFIX",
}
config, err := json.Marshal(IPFIXconfig)
o.Expect(err).ToNot(o.HaveOccurred())
IPFIXexporter := string(config)
g.By("Deploy FlowCollector with Loki disabled")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiEnable: "false",
LokiNamespace: namespace,
Exporters: []string{IPFIXexporter},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowcollector is deployed with IPFIX exporter")
flowPatch, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.exporters[0].type}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'IPFIX'`))
FLPconsumerPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "ipfix", "-l", "app=flowlogs-pipeline", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify flowlogs are seen in IPFIX consumer pod logs")
_, err = exutil.WaitAndGetSpecificPodLogs(oc, IPFIXns, "", FLPconsumerPod, `"Type:IPFIX"`)
exutil.AssertWaitPollNoErr(err, "Did not find Type IPFIX in ipfix-collector pod logs")
})
g.It("Author:memodi-High-74977-Verify OTEL exporter [Serial]", func() {
namespace := oc.Namespace()
// don't delete the OTEL Operator at the end of the test
g.By("Subscribe to OTEL Operator")
OtelNS.DeployOperatorNamespace(oc)
OTEL.SubscribeOperator(oc)
WaitForPodsReadyWithLabel(oc, OTEL.Namespace, "app.kubernetes.io/name="+OTEL.OperatorName)
OTELStatus := CheckOperatorStatus(oc, OTEL.Namespace, OTEL.PackageName)
o.Expect((OTELStatus)).To(o.BeTrue())
g.By("Create OTEL Collector")
otelCollectorTemplatePath := filePath.Join(baseDir, "exporters", "otel-collector.yaml")
otlpEndpoint := 4317
promEndpoint := "8889"
collectorname := "otel"
exutil.ApplyNsResourceFromTemplate(oc, namespace, "-f", otelCollectorTemplatePath, "-p", "NAME="+collectorname, "OTLP_GRPC_ENDPOINT="+strconv.Itoa(otlpEndpoint), "OTLP_PROM_PORT="+promEndpoint)
otelPodLabel := "app.kubernetes.io/component=opentelemetry-collector"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("opentelemetrycollector", collectorname, "-n", namespace).Execute()
WaitForPodsReadyWithLabel(oc, namespace, otelPodLabel)
targetHost := fmt.Sprintf("otel-collector-headless.%s.svc", namespace)
otel_config := map[string]interface{}{
"openTelemetry": map[string]interface{}{
"logs": map[string]bool{"enable": true},
"metrics": map[string]interface{}{"enable": true,
"pushTimeInterval": "20s"},
"targetHost": targetHost,
"targetPort": otlpEndpoint,
},
"type": "OpenTelemetry",
}
config, err := json.Marshal(otel_config)
o.Expect(err).NotTo(o.HaveOccurred())
config_str := string(config)
g.By("Deploy FlowCollector with Loki disabled")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiEnable: "false",
LokiNamespace: namespace,
Exporters: []string{config_str},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify OTEL pods are receiving the logs")
otelCollectorPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", otelPodLabel, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for 60 seconds to ensure we collected enough logs to grep from
time.Sleep(60 * time.Second)
g.By("Verify OTEL flowlogs are seen in collector pod logs")
textToExist := "Attributes:"
textToNotExist := "INVALID"
podLogs, err := getPodLogs(oc, namespace, otelCollectorPod)
o.Expect(err).ToNot(o.HaveOccurred())
grepCmd := fmt.Sprintf("grep %s %s", textToExist, podLogs)
textToExistLogs, err := exec.Command("bash", "-c", grepCmd).Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(len(textToExistLogs)).To(o.BeNumerically(">", 0))
grepCmd = fmt.Sprintf("grep %s %s || true", textToNotExist, podLogs)
textToNotExistLogs, err := exec.Command("bash", "-c", grepCmd).Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(len(textToNotExistLogs)).To(o.BeNumerically("==", 0), string(textToNotExistLogs))
g.By("Verify OTEL prometheus has metrics")
command := fmt.Sprintf("curl -s localhost:%s/metrics | grep 'netobserv_workload_flows_total{' | head -1 | awk '{print $2}'", promEndpoint)
cmd := []string{"-n", namespace, otelCollectorPod, "--", "/bin/sh", "-c", command}
count, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(err).ToNot(o.HaveOccurred())
nCount, err := strconv.Atoi(strings.Trim(count, "\n"))
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(nCount).To(o.BeNumerically(">", 0))
})
})
|
package netobserv
| ||||
test case
|
openshift/openshift-tests-private
|
43a3634b-a410-479b-bac7-9535720d2af6
|
Author:aramesha-High-64156-Verify IPFIX-exporter [Serial]
|
['"encoding/json"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_exporters.go
|
g.It("Author:aramesha-High-64156-Verify IPFIX-exporter [Serial]", func() {
namespace := oc.Namespace()
g.By("Create IPFIX namespace")
ipfixCollectorTemplatePath := filePath.Join(baseDir, "exporters", "ipfix-collector.yaml")
IPFIXns := "ipfix"
defer oc.DeleteSpecifiedNamespaceAsAdmin(IPFIXns)
oc.CreateSpecifiedNamespaceAsAdmin(IPFIXns)
exutil.SetNamespacePrivileged(oc, IPFIXns)
g.By("Deploy IPFIX collector")
createResourceFromFile(oc, IPFIXns, ipfixCollectorTemplatePath)
WaitForPodsReadyWithLabel(oc, IPFIXns, "app=flowlogs-pipeline")
IPFIXconfig := map[string]interface{}{
"ipfix": map[string]interface{}{
"targetHost": "flowlogs-pipeline.ipfix.svc.cluster.local",
"targetPort": 2055,
"transport": "UDP"},
"type": "IPFIX",
}
config, err := json.Marshal(IPFIXconfig)
o.Expect(err).ToNot(o.HaveOccurred())
IPFIXexporter := string(config)
g.By("Deploy FlowCollector with Loki disabled")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiEnable: "false",
LokiNamespace: namespace,
Exporters: []string{IPFIXexporter},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowcollector is deployed with IPFIX exporter")
flowPatch, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.exporters[0].type}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'IPFIX'`))
FLPconsumerPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "ipfix", "-l", "app=flowlogs-pipeline", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify flowlogs are seen in IPFIX consumer pod logs")
_, err = exutil.WaitAndGetSpecificPodLogs(oc, IPFIXns, "", FLPconsumerPod, `"Type:IPFIX"`)
exutil.AssertWaitPollNoErr(err, "Did not find Type IPFIX in ipfix-collector pod logs")
})
| |||||
test case
|
openshift/openshift-tests-private
|
43a6a450-8e50-41d5-8d74-4d5d9569f21b
|
Author:memodi-High-74977-Verify OTEL exporter [Serial]
|
['"encoding/json"', '"fmt"', '"os/exec"', '"strconv"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_exporters.go
|
g.It("Author:memodi-High-74977-Verify OTEL exporter [Serial]", func() {
namespace := oc.Namespace()
// don't delete the OTEL Operator at the end of the test
g.By("Subscribe to OTEL Operator")
OtelNS.DeployOperatorNamespace(oc)
OTEL.SubscribeOperator(oc)
WaitForPodsReadyWithLabel(oc, OTEL.Namespace, "app.kubernetes.io/name="+OTEL.OperatorName)
OTELStatus := CheckOperatorStatus(oc, OTEL.Namespace, OTEL.PackageName)
o.Expect((OTELStatus)).To(o.BeTrue())
g.By("Create OTEL Collector")
otelCollectorTemplatePath := filePath.Join(baseDir, "exporters", "otel-collector.yaml")
otlpEndpoint := 4317
promEndpoint := "8889"
collectorname := "otel"
exutil.ApplyNsResourceFromTemplate(oc, namespace, "-f", otelCollectorTemplatePath, "-p", "NAME="+collectorname, "OTLP_GRPC_ENDPOINT="+strconv.Itoa(otlpEndpoint), "OTLP_PROM_PORT="+promEndpoint)
otelPodLabel := "app.kubernetes.io/component=opentelemetry-collector"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("opentelemetrycollector", collectorname, "-n", namespace).Execute()
WaitForPodsReadyWithLabel(oc, namespace, otelPodLabel)
targetHost := fmt.Sprintf("otel-collector-headless.%s.svc", namespace)
otel_config := map[string]interface{}{
"openTelemetry": map[string]interface{}{
"logs": map[string]bool{"enable": true},
"metrics": map[string]interface{}{"enable": true,
"pushTimeInterval": "20s"},
"targetHost": targetHost,
"targetPort": otlpEndpoint,
},
"type": "OpenTelemetry",
}
config, err := json.Marshal(otel_config)
o.Expect(err).NotTo(o.HaveOccurred())
config_str := string(config)
g.By("Deploy FlowCollector with Loki disabled")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiEnable: "false",
LokiNamespace: namespace,
Exporters: []string{config_str},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify OTEL pods are receiving the logs")
otelCollectorPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", otelPodLabel, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for 60 seconds to ensure we collected enough logs to grep from
time.Sleep(60 * time.Second)
g.By("Verify OTEL flowlogs are seen in collector pod logs")
textToExist := "Attributes:"
textToNotExist := "INVALID"
podLogs, err := getPodLogs(oc, namespace, otelCollectorPod)
o.Expect(err).ToNot(o.HaveOccurred())
grepCmd := fmt.Sprintf("grep %s %s", textToExist, podLogs)
textToExistLogs, err := exec.Command("bash", "-c", grepCmd).Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(len(textToExistLogs)).To(o.BeNumerically(">", 0))
grepCmd = fmt.Sprintf("grep %s %s || true", textToNotExist, podLogs)
textToNotExistLogs, err := exec.Command("bash", "-c", grepCmd).Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(len(textToNotExistLogs)).To(o.BeNumerically("==", 0), string(textToNotExistLogs))
g.By("Verify OTEL prometheus has metrics")
command := fmt.Sprintf("curl -s localhost:%s/metrics | grep 'netobserv_workload_flows_total{' | head -1 | awk '{print $2}'", promEndpoint)
cmd := []string{"-n", namespace, otelCollectorPod, "--", "/bin/sh", "-c", command}
count, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(err).ToNot(o.HaveOccurred())
nCount, err := strconv.Atoi(strings.Trim(count, "\n"))
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(nCount).To(o.BeNumerically(">", 0))
})
| |||||
test
|
openshift/openshift-tests-private
|
b6bdb182-6733-4669-b360-91901a49f12b
|
test_flowcollector
|
import (
"encoding/json"
"fmt"
"os"
"os/exec"
filePath "path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
package netobserv
import (
"encoding/json"
"fmt"
"os"
"os/exec"
filePath "path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-netobserv] Network_Observability", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("netobserv", exutil.KubeConfigPath())
// NetObserv Operator variables
netobservNS = "openshift-netobserv-operator"
NOPackageName = "netobserv-operator"
NOcatSrc = Resource{"catsrc", "netobserv-konflux-fbc", "openshift-marketplace"}
NOSource = CatalogSourceObjects{"stable", NOcatSrc.Name, NOcatSrc.Namespace}
// Template directories
baseDir = exutil.FixturePath("testdata", "netobserv")
lokiDir = exutil.FixturePath("testdata", "netobserv", "loki")
networkingDir = exutil.FixturePath("testdata", "netobserv", "networking")
subscriptionDir = exutil.FixturePath("testdata", "netobserv", "subscription")
flowFixturePath = filePath.Join(baseDir, "flowcollector_v1beta2_template.yaml")
// Operator namespace object
OperatorNS = OperatorNamespace{
Name: netobservNS,
NamespaceTemplate: filePath.Join(subscriptionDir, "namespace.yaml"),
}
NO = SubscriptionObjects{
OperatorName: "netobserv-operator",
Namespace: netobservNS,
PackageName: NOPackageName,
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "allnamespace-og.yaml"),
CatalogSource: &NOSource,
}
// Loki Operator variables
lokiNS = "openshift-operators-redhat"
lokiPackageName = "loki-operator"
lokiSource CatalogSourceObjects
ls *lokiStack
Lokiexisting = false
LO = SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: lokiNS,
PackageName: lokiPackageName,
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "allnamespace-og.yaml"),
CatalogSource: &lokiSource,
}
)
g.BeforeEach(func() {
// check if test triggered as level0
testImportance := os.Getenv("TEST_IMPORTANCE")
if testImportance == "LEVEL0" {
g.By("Tests triggered as Level0; Use redhat-operators catSrc")
NOcatSrc.Name = "redhat-operators"
NOSource.SourceName = NOcatSrc.Name
} else {
if strings.Contains(os.Getenv("E2E_RUN_TAGS"), "disconnected") {
g.Skip("Skipping tests for disconnected profiles")
}
g.By("Deploy konflux FBC and ImageDigestMirrorSet")
imageDigest := filePath.Join(subscriptionDir, "image-digest-mirror-set.yaml")
catSrcTemplate := filePath.Join(subscriptionDir, "catalog-source.yaml")
catsrcErr := NOcatSrc.applyFromTemplate(oc, "-n", NOcatSrc.Namespace, "-f", catSrcTemplate)
o.Expect(catsrcErr).NotTo(o.HaveOccurred())
WaitUntilCatSrcReady(oc, NOcatSrc.Name)
ApplyResourceFromFile(oc, netobservNS, imageDigest)
}
ipStackType := checkIPStackType(oc)
g.By(fmt.Sprintf("Subscribe operators to %s channel", NOSource.Channel))
// check if Network Observability Operator is already present
NOexisting := CheckOperatorStatus(oc, NO.Namespace, NO.PackageName)
// create operatorNS and deploy operator if not present
if !NOexisting {
OperatorNS.DeployOperatorNamespace(oc)
NO.SubscribeOperator(oc)
// check if NO operator is deployed
WaitForPodsReadyWithLabel(oc, NO.Namespace, "app="+NO.OperatorName)
NOStatus := CheckOperatorStatus(oc, NO.Namespace, NO.PackageName)
o.Expect((NOStatus)).To(o.BeTrue())
// check if flowcollector API exists
flowcollectorAPIExists, err := isFlowCollectorAPIExists(oc)
o.Expect((flowcollectorAPIExists)).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
if !validateInfraAndResourcesForLoki(oc, "10Gi", "6") {
g.Skip("Current platform does not have enough resources available for this test!")
}
g.By("Deploy loki operator")
// check if Loki Operator exists
namespace := oc.Namespace()
Lokiexisting = CheckOperatorStatus(oc, LO.Namespace, LO.PackageName)
lokiChannel, err := getLokiChannel(oc, "redhat-operators")
if err != nil || lokiChannel == "" {
g.Skip("Loki channel not found, skip this case")
}
lokiSource = CatalogSourceObjects{lokiChannel, "redhat-operators", "openshift-marketplace"}
// Don't delete if Loki Operator existed already before NetObserv
// unless it is not using the 'stable' operator
// If Loki Operator was installed by NetObserv tests,
// it will install and uninstall after each spec/test.
if !Lokiexisting {
LO.SubscribeOperator(oc)
WaitForPodsReadyWithLabel(oc, LO.Namespace, "name="+LO.OperatorName)
} else {
channelName, err := checkOperatorChannel(oc, LO.Namespace, LO.PackageName)
o.Expect(err).NotTo(o.HaveOccurred())
if channelName != lokiChannel {
e2e.Logf("found %s channel for loki operator, removing and reinstalling with %s channel instead", channelName, lokiSource.Channel)
LO.uninstallOperator(oc)
LO.SubscribeOperator(oc)
WaitForPodsReadyWithLabel(oc, LO.Namespace, "name="+LO.OperatorName)
Lokiexisting = false
}
}
g.By("Deploy lokiStack")
// get storageClass Name
sc, err := getStorageClassName(oc)
if err != nil || len(sc) == 0 {
g.Skip("StorageClass not found in cluster, skip this case")
}
lokiTenant := "openshift-network"
lokiStackTemplate := filePath.Join(lokiDir, "lokistack-simple.yaml")
objectStorageType := getStorageType(oc)
if len(objectStorageType) == 0 && ipStackType != "ipv6single" {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
ls = &lokiStack{
Name: "lokistack",
Namespace: namespace,
TSize: "1x.demo",
StorageType: objectStorageType,
StorageSecret: "objectstore-secret",
StorageClass: sc,
BucketName: "netobserv-loki-" + getInfrastructureName(oc),
Tenant: lokiTenant,
Template: lokiStackTemplate,
}
if ipStackType == "ipv6single" {
e2e.Logf("running IPv6 test")
ls.EnableIPV6 = "true"
ls.StorageType = "s3"
}
err = ls.prepareResourcesForLokiStack(oc)
if err != nil {
g.Skip("Skipping test since LokiStack resources were not deployed")
}
err = ls.deployLokiStack(oc)
if err != nil {
g.Skip("Skipping test since LokiStack was not deployed")
}
lokiStackResource := Resource{"lokistack", ls.Name, ls.Namespace}
err = lokiStackResource.waitForResourceToAppear(oc)
if err != nil {
g.Skip("Skipping test since LokiStack did not become ready")
}
err = ls.waitForLokiStackToBeReady(oc)
if err != nil {
g.Skip("Skipping test since LokiStack is not ready")
}
ls.Route = "https://" + getRouteAddress(oc, ls.Namespace, ls.Name)
})
g.AfterEach(func() {
ls.removeLokiStack(oc)
ls.removeObjectStorage(oc)
if !Lokiexisting {
LO.uninstallOperator(oc)
}
})
g.Context("FLP, eBPF and Console metrics:", func() {
g.When("processor.metrics.TLS == Disabled and agent.ebpf.metrics.TLS == Disabled", func() {
g.It("Author:aramesha-LEVEL0-Critical-50504-Critical-72959-Verify flowlogs-pipeline and eBPF metrics and health [Serial]", func() {
var (
flpPromSM = "flowlogs-pipeline-monitor"
namespace = oc.Namespace()
eBPFPromSM = "ebpf-agent-svc-monitor"
curlLive = "http://localhost:8080/live"
)
g.By("Deploy flowcollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
FLPMetricServerTLSType: "Disabled",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowlogs-pipeline metrics")
FLPpods, err := exutil.GetAllPodsWithLabel(oc, namespace, "app=flowlogs-pipeline")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range FLPpods {
command := []string{"exec", "-n", namespace, pod, "--", "curl", "-s", curlLive}
output, err := oc.AsAdmin().WithoutNamespace().Run(command...).Args().Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.Equal("{}"))
}
FLPtlsScheme, err := getMetricsScheme(oc, flpPromSM, flow.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
FLPtlsScheme = strings.Trim(FLPtlsScheme, "'")
o.Expect(FLPtlsScheme).To(o.Equal("http"))
g.By("Wait for a min before scraping metrics")
time.Sleep(60 * time.Second)
g.By("Verify prometheus is able to scrape FLP metrics")
verifyFLPMetrics(oc)
g.By("Verify eBPF agent metrics")
eBPFpods, err := exutil.GetAllPodsWithLabel(oc, namespace, "app=netobserv-ebpf-agent")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range eBPFpods {
command := []string{"exec", "-n", namespace, pod, "--", "curl", "-s", curlLive}
output, err := oc.AsAdmin().WithoutNamespace().Run(command...).Args().Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.Equal("{}"))
}
eBPFtlsScheme, err := getMetricsScheme(oc, eBPFPromSM, flow.Namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
eBPFtlsScheme = strings.Trim(eBPFtlsScheme, "'")
o.Expect(eBPFtlsScheme).To(o.Equal("http"))
g.By("Wait for a min before scraping metrics")
time.Sleep(60 * time.Second)
g.By("Verify prometheus is able to scrape eBPF metrics")
verifyEBPFMetrics(oc)
})
})
g.When("processor.metrics.TLS == Auto and ebpf.agent.metrics.TLS == Auto", func() {
g.It("Author:aramesha-LEVEL0-Critical-54043-Critical-66031-Critical-72959-Verify flowlogs-pipeline, eBPF and Console metrics [Serial]", func() {
var (
flpPromSM = "flowlogs-pipeline-monitor"
flpPromSA = "flowlogs-pipeline-prom"
eBPFPromSM = "ebpf-agent-svc-monitor"
eBPFPromSA = "ebpf-agent-svc-prom"
namespace = oc.Namespace()
)
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFMetricServerTLSType: "Auto",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowlogs-pipeline metrics")
FLPtlsScheme, err := getMetricsScheme(oc, flpPromSM, flow.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
FLPtlsScheme = strings.Trim(FLPtlsScheme, "'")
o.Expect(FLPtlsScheme).To(o.Equal("https"))
FLPserverName, err := getMetricsServerName(oc, flpPromSM, flow.Namespace)
FLPserverName = strings.Trim(FLPserverName, "'")
o.Expect(err).NotTo(o.HaveOccurred())
FLPexpectedServerName := fmt.Sprintf("%s.%s.svc", flpPromSA, namespace)
o.Expect(FLPserverName).To(o.Equal(FLPexpectedServerName))
g.By("Wait for a min before scraping metrics")
time.Sleep(60 * time.Second)
g.By("Verify prometheus is able to scrape FLP and Console metrics")
verifyFLPMetrics(oc)
query := fmt.Sprintf("process_start_time_seconds{namespace=\"%s\", job=\"netobserv-plugin-metrics\"}", namespace)
metrics, err := getMetric(oc, query)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(popMetricValue(metrics)).Should(o.BeNumerically(">", 0))
g.By("Verify eBPF metrics")
eBPFtlsScheme, err := getMetricsScheme(oc, eBPFPromSM, flow.Namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
eBPFtlsScheme = strings.Trim(eBPFtlsScheme, "'")
o.Expect(eBPFtlsScheme).To(o.Equal("https"))
eBPFserverName, err := getMetricsServerName(oc, eBPFPromSM, flow.Namespace+"-privileged")
eBPFserverName = strings.Trim(eBPFserverName, "'")
o.Expect(err).NotTo(o.HaveOccurred())
eBPFexpectedServerName := fmt.Sprintf("%s.%s.svc", eBPFPromSA, namespace+"-privileged")
o.Expect(eBPFserverName).To(o.Equal(eBPFexpectedServerName))
g.By("Verify prometheus is able to scrape eBPF agent metrics")
verifyEBPFMetrics(oc)
})
})
})
g.It("Author:memodi-High-53595-High-49107-High-45304-High-54929-High-54840-High-68310-Verify flow correctness and metrics [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-54929",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-54929",
ObjectSize: "100K",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
startTime := time.Now()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("get flowlogs from loki")
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testServerTemplate.ServerNS,
DstK8S_Namespace: testClientTemplate.ClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
g.By("Wait for 2 mins before logs gets collected and written to loki")
time.Sleep(120 * time.Second)
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords > 0")
// verify flow correctness
verifyFlowCorrectness(testClientTemplate.ObjectSize, flowRecords)
// verify inner metrics
query := fmt.Sprintf(`sum(rate(netobserv_workload_ingress_bytes_total{SrcK8S_Namespace="%s"}[1m]))`, testClientTemplate.ClientNS)
metrics := pollMetrics(oc, query)
// verfy metric is between 270 and 330
o.Expect(metrics).Should(o.BeNumerically("~", 330, 270))
})
g.It("Author:aramesha-NonPreRelease-Longduration-High-60701-Verify connection tracking [Serial]", func() {
namespace := oc.Namespace()
startTime := time.Now()
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-60701",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-60701",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Deploy FlowCollector with endConversations LogType")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LogType: "EndedConversations",
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ClientNS,
DstK8S_Namespace: testClientTemplate.ServerNS,
RecordType: "endConnection",
DstK8S_OwnerName: "nginx-service",
}
g.By("Verify endConnection Records from loki")
endConnectionRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(endConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of endConnectionRecords > 0")
verifyConversationRecordTime(endConnectionRecords)
g.By("Deploy FlowCollector with Conversations LogType")
flow.DeleteFlowcollector(oc)
flow.LogType = "Conversations"
flow.CreateFlowcollector(oc)
g.By("Ensure flows are observed and all pods are running")
flow.WaitForFlowcollectorReady(oc)
g.By("Escalate SA to cluster admin")
bearerToken = getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime = time.Now()
time.Sleep(60 * time.Second)
g.By("Verify NewConnection Records from loki")
lokilabels.RecordType = "newConnection"
newConnectionRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(newConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of newConnectionRecords > 0")
verifyConversationRecordTime(newConnectionRecords)
g.By("Verify HeartbeatConnection Records from loki")
lokilabels.RecordType = "heartbeat"
heartbeatConnectionRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(heartbeatConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of heartbeatConnectionRecords > 0")
verifyConversationRecordTime(heartbeatConnectionRecords)
g.By("Verify EndConnection Records from loki")
lokilabels.RecordType = "endConnection"
endConnectionRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(endConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of endConnectionRecords > 0")
verifyConversationRecordTime(endConnectionRecords)
})
g.It("Author:memodi-NonPreRelease-Longduration-High-63839-Verify-multi-tenancy [Disruptive][Slow]", func() {
namespace := oc.Namespace()
users, usersHTpassFile, htPassSecret := getNewUser(oc, 2)
defer userCleanup(oc, users, usersHTpassFile, htPassSecret)
g.By("Creating client server template and template CRBs for testusers")
// create templates for testuser to be used later
testUserstemplate := filePath.Join(baseDir, "testuser-client-server_template.yaml")
stdout, stderr, err := oc.AsAdmin().Run("apply").Args("-f", testUserstemplate).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stderr).To(o.BeEmpty())
templateResource := strings.Split(stdout, " ")[0]
templateName := strings.Split(templateResource, "/")[1]
defer removeTemplatePermissions(oc, users[0].Username)
addTemplatePermissions(oc, users[0].Username)
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-63839",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err = testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-63839",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
o.Expect(err).NotTo(o.HaveOccurred())
// save original context
origContxt, contxtErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
e2e.Logf("orginal context is %v", origContxt)
defer removeUserAsReader(oc, users[0].Username)
addUserAsReader(oc, users[0].Username)
origUser := oc.Username()
e2e.Logf("current user is %s", origUser)
defer oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", origContxt).Execute()
defer oc.ChangeUser(origUser)
oc.ChangeUser(users[0].Username)
curUser := oc.Username()
e2e.Logf("current user is %s", curUser)
o.Expect(err).NotTo(o.HaveOccurred())
user0Contxt, contxtErr := oc.WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
e2e.Logf("user0 context is %v", user0Contxt)
g.By("Deploying test server and client pods as user0")
var (
testUserServerNS = fmt.Sprintf("%s-server", users[0].Username)
testUserClientNS = fmt.Sprintf("%s-client", users[0].Username)
)
defer oc.DeleteSpecifiedNamespaceAsAdmin(testUserClientNS)
defer oc.DeleteSpecifiedNamespaceAsAdmin(testUserServerNS)
configFile := exutil.ProcessTemplate(oc, "--ignore-unknown-parameters=true", templateName, "-p", "SERVER_NS="+testUserServerNS, "-p", "CLIENT_NS="+testUserClientNS)
err = oc.WithoutNamespace().Run("create").Args("-f", configFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// only required to getFlowLogs
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testUserServerNS,
DstK8S_Namespace: testUserClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
user0token, err := oc.WithoutNamespace().Run("whoami").Args("-t").Output()
e2e.Logf("token is %s", user0token)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("get flowlogs from loki")
flowRecords, err := lokilabels.getLokiFlowLogs(user0token, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords > 0")
g.By("verify no logs are fetched from an NS that user is not admin for")
lokilabels = Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ServerNS,
DstK8S_Namespace: testClientTemplate.ClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
flowRecords, err = lokilabels.getLokiFlowLogs(user0token, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).NotTo(o.BeNumerically(">", 0), "expected number of flowRecords to be equal to 0")
})
g.It("Author:aramesha-NonPreRelease-High-59746-NetObserv upgrade testing [Serial]", func() {
namespace := oc.Namespace()
g.By("Uninstall operator deployed by BeforeEach and delete operator NS")
NO.uninstallOperator(oc)
oc.DeleteSpecifiedNamespaceAsAdmin(netobservNS)
g.By("Deploy older version of netobserv operator")
NOcatSrc = Resource{"catsrc", "redhat-operators", "openshift-marketplace"}
NOSource = CatalogSourceObjects{"stable", NOcatSrc.Name, NOcatSrc.Namespace}
NO.CatalogSource = &NOSource
g.By(fmt.Sprintf("Subscribe operators to %s channel", NOSource.Channel))
OperatorNS.DeployOperatorNamespace(oc)
NO.SubscribeOperator(oc)
// check if NO operator is deployed
WaitForPodsReadyWithLabel(oc, netobservNS, "app="+NO.OperatorName)
NOStatus := CheckOperatorStatus(oc, netobservNS, NOPackageName)
o.Expect((NOStatus)).To(o.BeTrue())
// check if flowcollector API exists
flowcollectorAPIExists, err := isFlowCollectorAPIExists(oc)
o.Expect((flowcollectorAPIExists)).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Get NetObserv and components versions")
NOCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradeNOVersion := strings.Split(NOCSV, ".v")[1]
preUpgradeEBPFVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[0].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradeEBPFVersion = strings.Split(preUpgradeEBPFVersion, ":")[1]
preUpgradeFLPVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[1].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradeFLPVersion = strings.Split(preUpgradeFLPVersion, ":")[1]
preUpgradePluginVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[2].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradePluginVersion = strings.Split(preUpgradePluginVersion, ":")[1]
g.By("Upgrade NetObserv to latest version")
oc.AsAdmin().WithoutNamespace().Run("patch").Args("subscription", "netobserv-operator", "-n", netobservNS, "-p", `[{"op": "replace", "path": "/spec/source", "value": "netobserv-konflux-fbc"}]`, "--type=json").Output()
g.By("Wait for a min for operator upgrade")
time.Sleep(60 * time.Second)
WaitForPodsReadyWithLabel(oc, netobservNS, "app=netobserv-operator")
NOStatus = CheckOperatorStatus(oc, netobservNS, NOPackageName)
o.Expect((NOStatus)).To(o.BeTrue())
g.By("Get NetObserv operator and components versions")
NOCSV, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradeNOVersion := strings.Split(NOCSV, ".v")[1]
postUpgradeEBPFVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[0].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradeEBPFVersion = strings.Split(postUpgradeEBPFVersion, ":")[1]
postUpgradeFLPVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[1].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradeFLPVersion = strings.Split(postUpgradeFLPVersion, ":")[1]
postUpgradePluginVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[2].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradePluginVersion = strings.Split(postUpgradePluginVersion, ":")[1]
g.By("Verify versions are updated")
o.Expect(preUpgradeNOVersion).NotTo(o.Equal(postUpgradeNOVersion))
o.Expect(preUpgradeEBPFVersion).NotTo(o.Equal(postUpgradeEBPFVersion))
o.Expect(preUpgradeFLPVersion).NotTo(o.Equal(postUpgradeFLPVersion))
o.Expect(preUpgradePluginVersion).NotTo(o.Equal(postUpgradePluginVersion))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:aramesha-NonPreRelease-High-62989-Verify SCTP, ICMP, ICMPv6 traffic is observed [Disruptive]", func() {
namespace := oc.Namespace()
var (
sctpClientPodTemplatePath = filePath.Join(networkingDir, "sctpclient.yaml")
sctpServerPodTemplatePath = filePath.Join(networkingDir, "sctpserver.yaml")
sctpServerPodname = "sctpserver"
sctpClientPodname = "sctpclient"
)
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc)
g.By("Create netobserv-sctp NS")
SCTPns := "netobserv-sctp-62989"
defer oc.DeleteSpecifiedNamespaceAsAdmin(SCTPns)
oc.CreateSpecifiedNamespaceAsAdmin(SCTPns)
exutil.SetNamespacePrivileged(oc, SCTPns)
g.By("create sctpClientPod")
createResourceFromFile(oc, SCTPns, sctpClientPodTemplatePath)
WaitForPodsReadyWithLabel(oc, SCTPns, "name=sctpclient")
g.By("create sctpServerPod")
createResourceFromFile(oc, SCTPns, sctpServerPodTemplatePath)
WaitForPodsReadyWithLabel(oc, SCTPns, "name=sctpserver")
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
ipStackType := checkIPStackType(oc)
var sctpServerPodIP string
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP = getPodIPv4(oc, SCTPns, sctpServerPodname)
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP = getPodIPv6(oc, SCTPns, sctpServerPodname, ipStackType)
}
g.By("sctpserver pod start to wait for sctp traffic")
cmd, _, _, _ := oc.AsAdmin().Run("exec").Args("-n", SCTPns, sctpServerPodname, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmd.Process.Kill()
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(SCTPns, sctpServerPodname, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
startTime := time.Now()
e2eoutput.RunHostCmd(SCTPns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(SCTPns, sctpServerPodname, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
// Scenario1: Verify SCTP traffic
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: SCTPns,
DstK8S_Namespace: SCTPns,
}
g.By("Verify SCTP flows are seen on loki")
parameters := []string{"Proto=\"132\"", "DstPort=\"30102\""}
SCTPflows, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(SCTPflows)).Should(o.BeNumerically(">", 0), "expected number of SCTP flows > 0")
// Scenario2: Verify ICMP traffic
g.By("sctpclient ping sctpserver")
e2eoutput.RunHostCmd(SCTPns, sctpClientPodname, "ping -c 10 "+sctpServerPodIP)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
parameters = []string{"Proto=\"1\""}
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
parameters = []string{"Proto=\"58\""}
}
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
ICMPflows, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(ICMPflows)).Should(o.BeNumerically(">", 0), "expected number of ICMP flows > 0")
nICMPFlows := 0
for _, r := range ICMPflows {
if r.Flowlog.IcmpType == 8 || r.Flowlog.IcmpType == 0 {
nICMPFlows++
}
}
o.Expect(nICMPFlows).Should(o.BeNumerically(">", 0), "expected number of ICMP flows of type 8 or 0 (echo request or reply) > 0")
})
g.It("Author:aramesha-NonPreRelease-LEVEL0-High-68125-Verify DSCP with NetObserv [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-68125",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-68125",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
exutil.By("Check cluster network type")
networkType := exutil.CheckNetworkType(oc)
o.Expect(networkType).NotTo(o.BeEmpty())
if networkType == "ovnkubernetes" {
g.By("Deploy egressQoS for OVN CNI")
clientDSCPPath := filePath.Join(networkingDir, "test-client-DSCP.yaml")
egressQoSPath := filePath.Join(networkingDir, "egressQoS.yaml")
g.By("Deploy nginx client pod and egressQoS")
createResourceFromFile(oc, testClientTemplate.ClientNS, clientDSCPPath)
createResourceFromFile(oc, testClientTemplate.ClientNS, egressQoSPath)
}
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
// Scenario1: Verify default DSCP value=0
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ClientNS,
DstK8S_Namespace: testClientTemplate.ServerNS,
}
parameters := []string{"SrcK8S_Name=\"client\""}
g.By("Verify DSCP value=0")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.Dscp).To(o.Equal(0))
}
// Scenario2: Verify egress QoS feature for OVN CNI
if networkType == "ovnkubernetes" {
parameters = []string{"SrcK8S_Name=\"client-dscp\", Dscp=\"59\""}
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("Verify DSCP value=59 for flows from DSCP client pod")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows with DSCP value 59 should be > 0")
g.By("Verify DSCP value=0 for flows from pods other than DSCP client pod in test-client namespace")
parameters = []string{"SrcK8S_Name=\"client\", Dscp=\"0\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows with DSCP value 0 should be > 0")
}
// Scenario3: Explicitly passing QoS value in ping command
ipStackType := checkIPStackType(oc)
var destinationIP string
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
destinationIP = "1.1.1.1"
} else if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
destinationIP = "::1"
}
g.By("Ping loopback address with custom QoS from client pod")
startTime = time.Now()
e2eoutput.RunHostCmd(testClientTemplate.ClientNS, "client", "ping -c 10 -Q 0x80 "+destinationIP)
lokilabels = Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ClientNS,
}
parameters = []string{"DstAddr=\"" + destinationIP + "\""}
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("Verify DSCP value=32")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.Dscp).To(o.Equal(32))
}
})
g.It("Author:aramesha-NonPreRelease-High-69218-High-71291-Verify cluster ID and zone in multiCluster deployment [Serial]", func() {
namespace := oc.Namespace()
g.By("Get clusterID of the cluster")
clusterID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[].spec.clusterID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster ID is %s", clusterID)
g.By("Deploy FlowCollector with multiCluster and addZone enabled")
flow := Flowcollector{
Namespace: namespace,
MultiClusterDeployment: "true",
AddZone: "true",
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Verify K8S_ClusterName = Cluster ID")
clusteridlabels := Lokilabels{
App: "netobserv-flowcollector",
K8S_ClusterName: clusterID,
}
clusterIdFlowRecords, err := clusteridlabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(clusterIdFlowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows > 0")
g.By("Verify SrcK8S_Zone and DstK8S_Zone are present and have expected values")
zonelabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Type: "Node",
DstK8S_Type: "Node",
}
zoneFlowRecords, err := zonelabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
for _, r := range zoneFlowRecords {
expectedSrcK8SZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", r.Flowlog.SrcK8S_HostName, "-o=jsonpath={.metadata.labels.topology\\.kubernetes\\.io/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(r.Flowlog.SrcK8S_Zone).To(o.Equal(expectedSrcK8SZone))
expectedDstK8SZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", r.Flowlog.DstK8S_HostName, "-o=jsonpath={.metadata.labels.topology\\.kubernetes\\.io/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(r.Flowlog.DstK8S_Zone).To(o.Equal(expectedDstK8SZone))
}
})
g.It("Author:memodi-NonPreRelease-Longduration-Medium-60664-Medium-61482-Alerts-with-NetObserv [Serial][Slow]", func() {
namespace := oc.Namespace()
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify configured alerts for flp
g.By("Get FLP Alert name and Alert Rules")
FLPAlertRuleName := "flowlogs-pipeline-alert"
rules, err := getConfiguredAlertRules(oc, FLPAlertRuleName, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rules).To(o.ContainSubstring("NetObservNoFlows"))
o.Expect(rules).To(o.ContainSubstring("NetObservLokiError"))
// verify configured alerts for ebpf-agent
g.By("Get EBPF Alert name and Alert Rules")
ebpfAlertRuleName := "ebpf-agent-prom-alert"
ebpfRules, err := getConfiguredAlertRules(oc, ebpfAlertRuleName, namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ebpfRules).To(o.ContainSubstring("NetObservDroppedFlows"))
// verify disable alerts feature
g.By("Verify alerts can be disabled")
gen, err := getResourceGeneration(oc, "prometheusRule", "flowlogs-pipeline-alert", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
disableAlertPatchTemp := `[{"op": "$op", "path": "/spec/processor/metrics/disableAlerts", "value": ["NetObservLokiError"]}]`
disableAlertPatch := strings.Replace(disableAlertPatchTemp, "$op", "add", 1)
out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "--type=json", "-p", disableAlertPatch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("patched"))
waitForResourceGenerationUpdate(oc, "prometheusRule", FLPAlertRuleName, "generation", gen, namespace)
rules, err = getConfiguredAlertRules(oc, FLPAlertRuleName, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rules).To(o.ContainSubstring("NetObservNoFlows"))
o.Expect(rules).ToNot(o.ContainSubstring("NetObservLokiError"))
gen, err = getResourceGeneration(oc, "prometheusRule", "flowlogs-pipeline-alert", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
disableAlertPatch = strings.Replace(disableAlertPatchTemp, "$op", "remove", 1)
out, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "--type=json", "-p", disableAlertPatch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("patched"))
waitForResourceGenerationUpdate(oc, "prometheusRule", FLPAlertRuleName, "generation", gen, namespace)
rules, err = getConfiguredAlertRules(oc, FLPAlertRuleName, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rules).To(o.ContainSubstring("NetObservNoFlows"))
o.Expect(rules).To(o.ContainSubstring("NetObservLokiError"))
g.By("delete flowcollector")
flow.DeleteFlowcollector(oc)
// verify alert firing.
// configure flowcollector with incorrect loki URL
// configure very low CacheMaxFlows to have ebpf alert fired.
flow = Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
CacheMaxFlows: "100",
LokiMode: "Monolithic",
MonolithicLokiURL: "http://loki.no-ns.svc:3100",
}
g.By("Deploy flowcollector with incorrect loki URL and lower cacheMaxFlows value")
flow.CreateFlowcollector(oc)
flow.WaitForFlowcollectorReady(oc)
g.By("Wait for alerts to be active")
waitForAlertToBeActive(oc, "NetObservLokiError")
})
g.It("Author:aramesha-NonPreRelease-Medium-72875-Verify nodeSelector and tolerations with netobserv components [Serial]", func() {
namespace := oc.Namespace()
// verify tolerations
g.By("Get worker node of the cluster")
workerNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Taint worker node")
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", workerNode, "netobserv-agent", "--overwrite").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", workerNode, "netobserv-agent=true:NoSchedule", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Add wrong toleration for eBPF spec for the taint netobserv-agent=false:NoSchedule")
patchValue := `{"scheduling":{"tolerations":[{"effect": "NoSchedule", "key": "netobserv-agent", "value": "false", "operator": "Equal"}]}}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/advanced", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready")
flow.WaitForFlowcollectorReady(oc)
g.By(fmt.Sprintf("Verify eBPF pod is not scheduled on the %s", workerNode))
eBPFPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", flow.Namespace+"-privileged", "pods", "--field-selector", "spec.nodeName="+workerNode+"", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(eBPFPod).Should(o.BeEmpty())
g.By("Add correct toleration for eBPF spec for the taint netobserv-agent=true:NoSchedule")
flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
patchValue = `{"scheduling":{"tolerations":[{"effect": "NoSchedule", "key": "netobserv-agent", "value": "true", "operator": "Equal"}]}}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/advanced", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready")
flow.WaitForFlowcollectorReady(oc)
g.By(fmt.Sprintf("Verify eBPF pod is scheduled on the node %s after applying toleration for taint netobserv-agent=true:NoSchedule", workerNode))
eBPFPod, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", flow.Namespace+"-privileged", "pods", "--field-selector", "spec.nodeName="+workerNode+"", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(eBPFPod).NotTo(o.BeEmpty())
// verify nodeSelector
g.By("Add netobserv label to above worker node")
defer exutil.DeleteLabelFromNode(oc, workerNode, "test")
exutil.AddLabelToNode(oc, workerNode, "netobserv-agent", "true")
g.By("Patch flowcollector with nodeSelector for eBPF pods")
flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
patchValue = `{"scheduling":{"nodeSelector":{"netobserv-agent": "true"}}}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/advanced", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready")
flow.WaitForFlowcollectorReady(oc)
g.By("Verify all eBPF pods are deployed on the above worker node")
eBPFpods, err := exutil.GetAllPodsWithLabel(oc, flow.Namespace+"-privileged", "app=netobserv-ebpf-agent")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range eBPFpods {
nodeName, err := exutil.GetPodNodeName(oc, flow.Namespace+"-privileged", pod)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeName).To(o.Equal(workerNode))
}
})
g.It("Author:memodi-Medium-63185-Verify NetOberv must-gather plugin [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Run must-gather command")
mustGatherDir := "/tmp/must-gather-63185"
defer exec.Command("bash", "-c", "rm -rf "+mustGatherDir).Output()
output, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--image", "quay.io/netobserv/must-gather", "--dest-dir="+mustGatherDir).Output()
o.Expect(output).NotTo(o.ContainSubstring("error"))
g.By("Verify operator namespace logs are scraped")
mustGatherDir = mustGatherDir + "/quay-io-netobserv-must-gather-*"
operatorlogs, err := filePath.Glob(fmt.Sprintf("%s/namespaces/openshift-netobserv-operator/pods/netobserv-controller-manager-*/manager/manager/logs/current.log", mustGatherDir))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(operatorlogs[0])
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify flowlogs-pipeline pod logs are scraped")
pods, err := exutil.GetAllPods(oc, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
podlogs, err := filePath.Glob(fmt.Sprintf("%s/namespaces/%s/pods/%s/flowlogs-pipeline/flowlogs-pipeline/logs/current.log", mustGatherDir, namespace, pods[0]))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(podlogs[0])
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify eBPF agent pod logs are scraped")
ebpfPods, err := exutil.GetAllPods(oc, namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
ebpflogs, err := filePath.Glob(fmt.Sprintf("%s/namespaces/%s/pods/%s/netobserv-ebpf-agent/netobserv-ebpf-agent/logs/current.log", mustGatherDir, namespace+"-privileged", ebpfPods[0]))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(ebpflogs[0])
o.Expect(err).NotTo(o.HaveOccurred())
// TODO: once supported add a check for flowcollector dumped file.
})
g.It("Author:aramesha-NonPreRelease-High-73175-Verify eBPF agent filtering [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector with eBPF agent flowFilter to Reject flows with SrcPort 53 and UDP protocol")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// Scenario1: With REJECT action
g.By("Patch flowcollector with eBPF agent flowFilter to Reject flows with SrcPort 53 and UDP Protocol")
action := "Reject"
patchValue := `{"action": "` + action + `", "cidr": "0.0.0.0/0", "protocol": "UDP", "sourcePorts": "53", "enable": true}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/flowFilter", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready with Reject flowFilter")
flow.WaitForFlowcollectorReady(oc)
// check if patch is successful
flowPatch, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.agent.ebpf.flowFilter.action}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'Reject'`))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify number of flows with on UDP Protcol with SrcPort 53 = 0")
parameters := []string{"Proto=\"17\"", "SrcPort=\"53\""}
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically("==", 0), "expected number of flows on UDP with SrcPort 53 = 0")
g.By("Verify number of flows on TCP Protocol > 0")
parameters = []string{"Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows on TCP > 0")
// Scenario2: With ACCEPT action
g.By("Patch flowcollector with eBPF agent flowFilter to Accept flows with SrcPort 53")
action = "Accept"
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/flowFilter/action", "value": `+action+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready with Accept flowFilter")
flow.WaitForFlowcollectorReady(oc)
// check if patch is successful
flowPatch, err = oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.agent.ebpf.flowFilter.action}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'Accept'`))
g.By("Wait for a min before logs gets collected and written to loki")
startTime = time.Now()
time.Sleep(60 * time.Second)
g.By("Verify number of flows on UDP Protocol with SrcPort 53 > 0")
parameters = []string{"Proto=\"17\"", "SrcPort=\"53\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows on UDP with SrcPort 53 > 0")
g.By("Verify number of flows on TCP Protocol = 0")
parameters = []string{"Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically("==", 0), "expected number of flows on TCP = 0")
g.By("Verify prometheus is able to scrape eBPF metrics")
verifyEBPFFilterMetrics(oc)
})
g.It("Author:memodi-Medium-53844-Sanity Test NetObserv [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err := addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify flows are written to loki")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows written to loki > 0")
})
g.It("Author:aramesha-High-67782-Verify large volume downloads [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFCacheActiveTimeout: "30s",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err := addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Deploy test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-67782",
Template: serverTemplate,
LargeBlob: "yes",
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err = testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-67782",
ObjectSize: "100M",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Wait for 2 mins before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(120 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ServerNS,
DstK8S_Namespace: testClientTemplate.ClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
g.By("Verify flows are written to loki")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows written to loki > 0")
g.By("Verify flow correctness")
verifyFlowCorrectness(testClientTemplate.ObjectSize, flowRecords)
})
g.It("Author:aramesha-High-75656-Verify TCP flags [Disruptive]", func() {
namespace := oc.Namespace()
SYNFloodMetricsPath := filePath.Join(baseDir, "SYN_flood_metrics_template.yaml")
SYNFloodAlertsPath := filePath.Join(baseDir, "SYN_flood_alert_template.yaml")
g.By("Get kubeadmin token")
kubeAdminPasswd := os.Getenv("QE_KUBEADMIN_PASSWORD")
if kubeAdminPasswd == "" {
g.Skip("no kubeAdminPasswd is provided in this profile, skip it")
}
serverUrl, serverUrlErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-server").Output()
o.Expect(serverUrlErr).NotTo(o.HaveOccurred())
currentContext, currentContextErr := oc.WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(currentContextErr).NotTo(o.HaveOccurred())
defer func() {
rollbackCtxErr := oc.WithoutNamespace().Run("config").Args("set", "current-context", currentContext).Execute()
o.Expect(rollbackCtxErr).NotTo(o.HaveOccurred())
}()
kubeadminToken := getKubeAdminToken(oc, kubeAdminPasswd, serverUrl, currentContext)
o.Expect(kubeadminToken).NotTo(o.BeEmpty())
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Patch flowcollector with eBPF agent flowFilter to Reject flows with tcpFlags SYN-ACK and TCP Protocol")
patchValue := `{"action": "Reject", "cidr": "0.0.0.0/0", "protocol": "TCP", "tcpFlags": "SYN-ACK", "enable": true}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/flowFilter", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready with Reject flowFilter")
flow.WaitForFlowcollectorReady(oc)
// check if patch is successful
flowPatch, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.agent.ebpf.flowFilter.action}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'Reject'`))
g.By("Deploy custom metrics to detect SYN flooding")
customMetrics := CustomMetrics{
Namespace: namespace,
Template: SYNFloodMetricsPath,
}
curv, err := getResourceVersion(oc, "cm", "flowlogs-pipeline-config-dynamic", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
customMetrics.createCustomMetrics(oc)
waitForResourceGenerationUpdate(oc, "cm", "flowlogs-pipeline-config-dynamic", "resourceVersion", curv, namespace)
g.By("Deploy SYN flooding alert rule")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertingrule.monitoring.openshift.io", "netobserv-syn-alerts", "-n", "openshift-monitoring")
configFile := exutil.ProcessTemplate(oc, "--ignore-unknown-parameters=true", "-f", SYNFloodAlertsPath, "-p", "Namespace=openshift-monitoring")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
o.Expect(err).ToNot(o.HaveOccurred())
g.By("Deploy test client pod to induce SYN flooding")
template := filePath.Join(baseDir, "test-SYN-flood-client_template.yaml")
testTemplate := TestClientTemplate{
ClientNS: "test-client-75656",
Template: template,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testTemplate.ClientNS)
configFile = exutil.ProcessTemplate(oc, "--ignore-unknown-parameters=true", "-f", testTemplate.Template, "-p", "CLIENT_NS="+testTemplate.ClientNS)
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
o.Expect(err).ToNot(o.HaveOccurred())
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify no flows with SYN_ACK TCP flag")
parameters := []string{"Flags=\"SYN_ACK\""}
flowRecords, err := lokilabels.getLokiFlowLogs(kubeadminToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
// Loop needed since even flows with flags SYN, ACK are matched
count := 0
for _, r := range flowRecords {
for _, f := range r.Flowlog.Flags {
o.Expect(f).ToNot(o.Equal("SYN_ACK"))
}
}
o.Expect(count).Should(o.BeNumerically("==", 0), "expected number of flows with SYN_ACK TCPFlag = 0")
g.By("Verify SYN flooding flows")
parameters = []string{"Flags=\"SYN\"", "DstAddr=\"192.168.1.159\""}
flowRecords, err = lokilabels.getLokiFlowLogs(kubeadminToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of SYN flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.Bytes).Should(o.BeNumerically("==", 54))
}
g.By("Wait for alerts to be active")
waitForAlertToBeActive(oc, "NetObserv-SYNFlood-out")
waitForAlertToBeActive(oc, "NetObserv-SYNFlood-in")
})
g.It("Author:aramesha-NonPreRelease-Longduration-High-76537-Verify flow enrichment for VM's secondary interfaces [Disruptive][Slow]", func() {
namespace := oc.Namespace()
testNS := "test-76537"
virtOperatorNS := "openshift-cnv"
if !hasMetalWorkerNodes(oc) {
g.Skip("Cluster does not have baremetal workers. Skip this test!")
}
g.By("Get kubeadmin token")
kubeAdminPasswd := os.Getenv("QE_KUBEADMIN_PASSWORD")
if kubeAdminPasswd == "" {
g.Skip("no kubeAdminPasswd is provided in this profile, skip it")
}
serverUrl, serverUrlErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-server").Output()
o.Expect(serverUrlErr).NotTo(o.HaveOccurred())
currentContext, currentContextErr := oc.WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(currentContextErr).NotTo(o.HaveOccurred())
defer func() {
rollbackCtxErr := oc.WithoutNamespace().Run("config").Args("set", "current-context", currentContext).Execute()
o.Expect(rollbackCtxErr).NotTo(o.HaveOccurred())
}()
kubeadminToken := getKubeAdminToken(oc, kubeAdminPasswd, serverUrl, currentContext)
o.Expect(kubeadminToken).NotTo(o.BeEmpty())
virtualizationDir := exutil.FixturePath("testdata", "netobserv", "virtualization")
kubevirtHyperconvergedPath := filePath.Join(virtualizationDir, "kubevirt-hyperconverged.yaml")
layer2NadPath := filePath.Join(virtualizationDir, "layer2-nad.yaml")
testVM1 := filePath.Join(virtualizationDir, "test-vm1.yaml")
testVM2 := filePath.Join(virtualizationDir, "test-vm2.yaml")
g.By("Deploy openshift-cnv namespace")
OperatorNS.Name = virtOperatorNS
OperatorNS.DeployOperatorNamespace(oc)
g.By("Deploy Openshift Virtualization operator")
virtCatsrc := Resource{"catsrc", "redhat-operators", "openshift-marketplace"}
virtPackageName := "kubevirt-hyperconverged"
virtSource := CatalogSourceObjects{"stable", virtCatsrc.Name, virtCatsrc.Namespace}
VO := SubscriptionObjects{
OperatorName: "kubevirt-hyperconverged",
Namespace: virtOperatorNS,
PackageName: virtPackageName,
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "singlenamespace-og.yaml"),
CatalogSource: &virtSource,
}
defer VO.uninstallOperator(oc)
VO.SubscribeOperator(oc)
WaitForPodsReadyWithLabel(oc, VO.Namespace, "name=virt-operator")
g.By("Deploy OpenShift Virtualization Deployment CR")
defer deleteResource(oc, "hyperconverged", "kubevirt-hyperconverged", virtOperatorNS)
_, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", kubevirtHyperconvergedPath).Output()
o.Expect(err).ToNot(o.HaveOccurred())
// Wait a min for hyperconverged pods to come up
time.Sleep(60 * time.Second)
waitUntilHyperConvergedReady(oc, "kubevirt-hyperconverged", virtOperatorNS)
WaitForPodsReadyWithLabel(oc, virtOperatorNS, "app.kubernetes.io/managed-by=virt-operator")
g.By("Deploy Network Attachment Definition in test-76537 namespace")
defer deleteNamespace(oc, testNS)
defer deleteResource(oc, "net-attach-def", "l2-network", testNS)
_, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", layer2NadPath).Output()
o.Expect(err).ToNot(o.HaveOccurred())
// Wait a min for NAD to come up
time.Sleep(60 * time.Second)
checkNAD(oc, "l2-network", testNS)
g.By("Deploy test VM1")
defer deleteResource(oc, "vm", "test-vm1", testNS)
_, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", testVM1, "-n", testNS).Output()
o.Expect(err).ToNot(o.HaveOccurred())
waitUntilVMReady(oc, "test-vm1", testNS)
startTime := time.Now()
g.By("Deploy test VM2")
defer deleteResource(oc, "vm", "test-vm2", testNS)
_, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", testVM2, "-n", testNS).Output()
o.Expect(err).ToNot(o.HaveOccurred())
waitUntilVMReady(oc, "test-vm2", testNS)
secondaryNetworkConfig := map[string]interface{}{
"index": []interface{}{"MAC"},
"name": "test-76537/l2-network",
}
config, err := json.Marshal(secondaryNetworkConfig)
o.Expect(err).ToNot(o.HaveOccurred())
secNetConfig := string(config)
g.By("Deploy FlowCollector with secondary Network config")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFPrivileged: "true",
SecondayNetworks: []string{secNetConfig},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowcollector is deployed with Secondary Network config")
secondaryNetworkName, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.processor.advanced.secondaryNetworks[0].name}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secondaryNetworkName).To(o.Equal(`'test-76537/l2-network'`))
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testNS,
SrcK8S_OwnerName: "test-vm2",
DstK8S_Namespace: testNS,
DstK8S_OwnerName: "test-vm1",
}
parameters := []string{"DstAddr=\"10.10.10.15\"", "SrcAddr=\"10.10.10.14\""}
g.By("Verify flows are written to loki")
flowRecords, err := lokilabels.getLokiFlowLogs(kubeadminToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows written to loki > 0")
g.By("Verify flow logs are enriched")
// Get VM1 pod name and node
vm1podname, err := exutil.GetAllPodsWithLabel(oc, testNS, "vm.kubevirt.io/name=test-vm1")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm1podname).NotTo(o.BeEmpty())
vm1node, err := exutil.GetPodNodeName(oc, testNS, vm1podname[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm1node).NotTo(o.BeEmpty())
// Get vm2 pod name and node
vm2podname, err := exutil.GetAllPodsWithLabel(oc, testNS, "vm.kubevirt.io/name=test-vm2")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm2podname).NotTo(o.BeEmpty())
vm2node, err := exutil.GetPodNodeName(oc, testNS, vm2podname[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm2node).NotTo(o.BeEmpty())
for _, r := range flowRecords {
o.Expect(r.Flowlog.DstK8S_Name).Should(o.ContainSubstring(vm1podname[0]))
o.Expect(r.Flowlog.SrcK8S_Name).Should(o.ContainSubstring(vm2podname[0]))
o.Expect(r.Flowlog.DstK8S_OwnerType).Should(o.ContainSubstring("VirtualMachineInstance"))
o.Expect(r.Flowlog.SrcK8S_OwnerType).Should(o.ContainSubstring("VirtualMachineInstance"))
o.Expect(r.Flowlog.DstK8S_NetworkName).Should(o.ContainSubstring("test-76537/l2-network"))
o.Expect(r.Flowlog.SrcK8S_NetworkName).Should(o.ContainSubstring("test-76537/l2-network"))
}
})
g.It("Author:aramesha-NonPreRelease-Longduration-Medium-78480-NetObserv with sampling 50 [Serial][Slow]", func() {
namespace := oc.Namespace()
g.By("Deploy DNS pods")
DNSTemplate := filePath.Join(baseDir, "DNS-pods.yaml")
DNSNamespace := "dns-traffic"
defer oc.DeleteSpecifiedNamespaceAsAdmin(DNSNamespace)
ApplyResourceFromFile(oc, DNSNamespace, DNSTemplate)
exutil.AssertAllPodsToBeReady(oc, DNSNamespace)
g.By("Deploy test server and client pods")
servertemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-78480",
Template: servertemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-78480",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Deploy FlowCollector with all features enabled with sampling 50")
// Bring in the NetworkEvents feature once its GA. Dont want to skip the whole test-case if the tech-preview flag is not set
flow := Flowcollector{
Namespace: namespace,
EBPFPrivileged: "true",
EBPFeatures: []string{"\"DNSTracking\", \"PacketDrop\", \"FlowRTT\", \"PacketTranslation\""},
Sampling: "50",
LokiNamespace: namespace,
Template: flowFixturePath,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for 2 mins before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(120 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify Packet Drop flows")
lokiParams := []string{"PktDropLatestState=\"TCP_INVALID_STATE\"", "Proto=\"6\""}
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of TCP Invalid State flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.PktDropLatestDropCause).NotTo(o.BeEmpty())
o.Expect(r.Flowlog.PktDropBytes).Should(o.BeNumerically(">", 0))
o.Expect(r.Flowlog.PktDropPackets).Should(o.BeNumerically(">", 0))
}
lokiParams = []string{"PktDropLatestDropCause=\"SKB_DROP_REASON_NO_SOCKET\"", "Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of No Socket TCP flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.PktDropLatestState).NotTo(o.BeEmpty())
o.Expect(r.Flowlog.PktDropBytes).Should(o.BeNumerically(">", 0))
o.Expect(r.Flowlog.PktDropPackets).Should(o.BeNumerically(">", 0))
}
g.By("Verify flowRTT flows")
lokiParams = []string{"Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of TCP flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.TimeFlowRttNs).Should(o.BeNumerically(">=", 0))
}
g.By("Verify TCP DNS flows")
lokilabels.DstK8S_Namespace = DNSNamespace
lokiParams = []string{"DnsFlagsResponseCode=\"NoError\"", "SrcPort=\"53\"", "DstK8S_Name=\"dnsutils1\"", "Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of TCP DNS flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.DnsLatencyMs).Should(o.BeNumerically(">=", 0))
}
g.By("Verify UDP DNS flows")
lokiParams = []string{"DnsFlagsResponseCode=\"NoError\"", "SrcPort=\"53\"", "DstK8S_Name=\"dnsutils2\"", "Proto=\"17\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of UDP DNS flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.DnsLatencyMs).Should(o.BeNumerically(">=", 0))
}
g.By("Verify Packet Translation flows")
lokilabels = Lokilabels{
App: "netobserv-flowcollector",
DstK8S_Type: "Service",
DstK8S_Namespace: testClientTemplate.ServerNS,
SrcK8S_Namespace: testClientTemplate.ClientNS,
}
lokiParams = []string{"ZoneId>0"}
g.By("Verify PacketTranslation flows")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of PacketTranslation flows > 0")
verifyPacketTranslationFlows(oc, testClientTemplate.ServerNS, testClientTemplate.ClientNS, flowRecords)
g.By("Verify eBPF feature metrics")
verifyEBPFFeatureMetrics(oc, "Drops")
verifyEBPFFeatureMetrics(oc, "RTT")
verifyEBPFFeatureMetrics(oc, "DNS")
verifyEBPFFeatureMetrics(oc, "Xlat")
})
g.It("Author:aramesha-NonPreRelease-High-79015-Verify PacketTranslation feature [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy test server and client pods")
servertemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-79015",
ServiceType: "ClusterIP",
Template: servertemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-79015",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Deploy FlowCollector with PacketTranslation feature enabled")
flow := Flowcollector{
Namespace: namespace,
EBPFeatures: []string{"\"PacketTranslation\""},
LokiNamespace: namespace,
Template: flowFixturePath,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for 2 mins before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(120 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
DstK8S_Type: "Service",
DstK8S_Namespace: testClientTemplate.ServerNS,
SrcK8S_Namespace: testClientTemplate.ClientNS,
}
lokiParams := []string{"ZoneId>0"}
g.By("Verify PacketTranslation flows")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of PacketTranslation flows > 0")
verifyPacketTranslationFlows(oc, testClientTemplate.ServerNS, testClientTemplate.ClientNS, flowRecords)
})
// NetworkEvents ebpf hook only supported for OCP >= 4.17
g.It("Author:memodi-NonPreRelease-Medium-77894-TechPreview Network Policies Correlation [Serial]", func() {
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("Skipping because the TechPreviewNoUpgrade is not enabled on the cluster.")
}
namespace := oc.Namespace()
g.By("Deploy client-server pods in 2 client NS and one Server NS")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-77894",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
client1Template := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClient1Template := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client1-77894",
Template: client1Template,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClient1Template.ClientNS)
err = testClient1Template.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClient1Template.ClientNS)
testClient2Template := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client2-77894",
Template: client1Template,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClient2Template.ClientNS)
err = testClient2Template.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClient2Template.ClientNS)
// create flowcollector with NWEvents.
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFeatures: []string{"\"NetworkEvents\""},
EBPFPrivileged: "true",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("get flowlogs from loki")
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
DstK8S_Namespace: testClient1Template.ServerNS,
DstK8S_Type: "Pod",
SrcK8S_Type: "Pod",
}
lokiParams := []string{"FlowDirection!=1"}
lokilabels.SrcK8S_Namespace = testClient1Template.ClientNS
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-2*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
g.By("deploy BANP policy")
banpTemplate := filePath.Join(baseDir, "networking", "baselineadminnetworkPolicy.yaml")
banpParameters := []string{"--ignore-unknown-parameters=true", "-p", "SERVER_NS=" + testClient1Template.ServerNS, "CLIENT1_NS=" + testClient1Template.ClientNS, "CLIENT2_NS=" + testClient2Template.ClientNS, "-f", banpTemplate}
// banp is a cluster scoped resource so passing empty string for NS arg.
defer deleteResource(oc, "banp", "default", "")
err = exutil.ApplyClusterResourceFromTemplateWithError(oc, banpParameters...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("check flows have NW Events")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-2*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "drop", "BaselineAdminNetworkPolicy", "Ingress")
g.By("deploy NetworkPolicy")
netpolTemplate := filePath.Join(baseDir, "networking", "networkPolicy.yaml")
netpolName := "allow-ingress"
netPolParameters := []string{"--ignore-unknown-parameters=true", "-p", "NAME=" + netpolName, "SERVER_NS=" + testClient1Template.ServerNS, "ALLOW_NS=" + testClient1Template.ClientNS, "-f", netpolTemplate}
defer deleteResource(oc, "netpol", netpolName, testClient1Template.ServerNS)
err = exutil.ApplyClusterResourceFromTemplateWithError(oc, netPolParameters...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("check flows from server to client1")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "allow-related", "NetworkPolicy", "Ingress")
g.By("check flows from server to client2")
lokilabels.SrcK8S_Namespace = testClient2Template.ClientNS
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "drop", "NetpolNamespace", "Ingress")
g.By("deploy ANP policy")
anpTemplate := filePath.Join(baseDir, "networking", "adminnetworkPolicy.yaml")
anpName := "server-ns"
anpParameters := []string{"--ignore-unknown-parameters=true", "-p", "NAM=" + anpName, "SERVER_NS=" + testClient1Template.ServerNS, "ALLOW_NS=" + testClient2Template.ClientNS, "DENY_NS=" + testClient1Template.ClientNS, "-f", anpTemplate}
defer deleteResource(oc, "anp", anpName, "")
err = exutil.ApplyClusterResourceFromTemplateWithError(oc, anpParameters...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("check flows from server to client2")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "allow-related", "AdminNetworkPolicy", "Ingress")
g.By("check flows from server to client1")
lokilabels.SrcK8S_Namespace = testClient1Template.ClientNS
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "drop", "AdminNetworkPolicy", "Ingress")
})
//Add future NetObserv + Loki test-cases here
g.Context("with Kafka", func() {
var (
kafkaDir, kafkaTopicPath string
AMQexisting = false
amq SubscriptionObjects
kafkaMetrics KafkaMetrics
kafka Kafka
kafkaTopic KafkaTopic
kafkaUser KafkaUser
)
g.BeforeEach(func() {
namespace := oc.Namespace()
kafkaDir = exutil.FixturePath("testdata", "netobserv", "kafka")
// Kafka Topic path
kafkaTopicPath = filePath.Join(kafkaDir, "kafka-topic.yaml")
// Kafka TLS Template path
kafkaTLSPath := filePath.Join(kafkaDir, "kafka-tls.yaml")
// Kafka metrics config Template path
kafkaMetricsPath := filePath.Join(kafkaDir, "kafka-metrics-config.yaml")
// Kafka User path
kafkaUserPath := filePath.Join(kafkaDir, "kafka-user.yaml")
g.By("Subscribe to AMQ operator")
kafkaSource := CatalogSourceObjects{"stable", "redhat-operators", "openshift-marketplace"}
amq = SubscriptionObjects{
OperatorName: "amq-streams-cluster-operator",
Namespace: "openshift-operators",
PackageName: "amq-streams",
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
CatalogSource: &kafkaSource,
}
// check if amq Streams Operator is already present
AMQexisting = CheckOperatorStatus(oc, amq.Namespace, amq.PackageName)
if !AMQexisting {
amq.SubscribeOperator(oc)
// before creating kafka, check the existence of crd kafkas.kafka.strimzi.io
checkResource(oc, true, true, "kafka.strimzi.io", []string{"crd", "kafkas.kafka.strimzi.io", "-ojsonpath={.spec.group}"})
}
kafkaMetrics = KafkaMetrics{
Namespace: namespace,
Template: kafkaMetricsPath,
}
kafka = Kafka{
Name: "kafka-cluster",
Namespace: namespace,
Template: kafkaTLSPath,
StorageClass: ls.StorageClass,
}
kafkaTopic = KafkaTopic{
TopicName: "network-flows",
Name: kafka.Name,
Namespace: namespace,
Template: kafkaTopicPath,
}
kafkaUser = KafkaUser{
UserName: "flp-kafka",
Name: kafka.Name,
Namespace: namespace,
Template: kafkaUserPath,
}
g.By("Deploy Kafka with TLS")
kafkaMetrics.deployKafkaMetrics(oc)
kafka.deployKafka(oc)
kafkaTopic.deployKafkaTopic(oc)
kafkaUser.deployKafkaUser(oc)
g.By("Check if Kafka and Kafka topic are ready")
// wait for Kafka and KafkaTopic to be ready
waitForKafkaReady(oc, kafka.Name, kafka.Namespace)
waitForKafkaTopicReady(oc, kafkaTopic.TopicName, kafkaTopic.Namespace)
})
g.AfterEach(func() {
kafkaUser.deleteKafkaUser(oc)
kafkaTopic.deleteKafkaTopic(oc)
kafka.deleteKafka(oc)
if !AMQexisting {
amq.uninstallOperator(oc)
}
})
g.It("Author:aramesha-NonPreRelease-Longduration-High-56362-High-53597-High-56326-Verify network flows are captured with Kafka with TLS [Serial][Slow]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector with Kafka TLS")
flow := Flowcollector{
Namespace: namespace,
DeploymentModel: "Kafka",
Template: flowFixturePath,
LokiNamespace: namespace,
KafkaAddress: fmt.Sprintf("kafka-cluster-kafka-bootstrap.%s:9093", namespace),
KafkaTLSEnable: "true",
KafkaNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Ensure secrets are synced")
// ensure certs are synced to privileged NS
secrets, err := getSecrets(oc, namespace+"-privileged")
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secrets).To(o.And(o.ContainSubstring(kafkaUser.UserName), o.ContainSubstring(kafka.Name+"-cluster-ca-cert")))
g.By("Verify prometheus is able to scrape metrics for FLP-Kafka")
flpPrpmSM := "flowlogs-pipeline-transformer-monitor"
tlsScheme, err := getMetricsScheme(oc, flpPrpmSM, flow.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
tlsScheme = strings.Trim(tlsScheme, "'")
o.Expect(tlsScheme).To(o.Equal("https"))
serverName, err := getMetricsServerName(oc, flpPrpmSM, flow.Namespace)
serverName = strings.Trim(serverName, "'")
o.Expect(err).NotTo(o.HaveOccurred())
flpPromSA := "flowlogs-pipeline-transformer-prom"
expectedServerName := fmt.Sprintf("%s.%s.svc", flpPromSA, namespace)
o.Expect(serverName).To(o.Equal(expectedServerName))
// verify FLP metrics are being populated with Kafka
// Sleep before making any metrics request
g.By("Verify prometheus is able to scrape FLP metrics")
time.Sleep(30 * time.Second)
verifyFLPMetrics(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:aramesha-NonPreRelease-Longduration-High-57397-High-65116-High-75340-Verify network-flows export with Kafka and netobserv installation without Loki and networkPolicy enabled[Serial]", func() {
namespace := oc.Namespace()
kafkaAddress := fmt.Sprintf("kafka-cluster-kafka-bootstrap.%s:9093", namespace)
g.By("Deploy kafka Topic for export")
// deploy kafka topic for export
kafkaTopic2 := KafkaTopic{
TopicName: "network-flows-export",
Name: kafka.Name,
Namespace: namespace,
Template: kafkaTopicPath,
}
defer kafkaTopic2.deleteKafkaTopic(oc)
kafkaTopic2.deployKafkaTopic(oc)
waitForKafkaTopicReady(oc, kafkaTopic2.TopicName, kafkaTopic2.Namespace)
kafkaExporterConfig := map[string]interface{}{
"kafka": map[string]interface{}{
"address": kafkaAddress,
"tls": map[string]interface{}{
"caCert": map[string]interface{}{
"certFile": "ca.crt",
"name": "kafka-cluster-cluster-ca-cert",
"namespace": namespace,
"type": "secret"},
"enable": true,
"insecureSkipVerify": false,
"userCert": map[string]interface{}{
"certFile": "user.crt",
"certKey": "user.key",
"name": kafkaUser.UserName,
"namespace": namespace,
"type": "secret"},
},
"topic": kafkaTopic2.TopicName},
"type": "Kafka",
}
config, err := json.Marshal(kafkaExporterConfig)
o.Expect(err).ToNot(o.HaveOccurred())
kafkaConfig := string(config)
networkPolicyAddNamespaces := "openshift-ingress"
config, err = json.Marshal(networkPolicyAddNamespaces)
o.Expect(err).ToNot(o.HaveOccurred())
AdditionalNamespaces := string(config)
g.By("Deploy FlowCollector with Kafka TLS")
flow := Flowcollector{
Namespace: namespace,
DeploymentModel: "Kafka",
Template: flowFixturePath,
LokiNamespace: namespace,
KafkaAddress: kafkaAddress,
KafkaTLSEnable: "true",
KafkaNamespace: namespace,
Exporters: []string{kafkaConfig},
NetworkPolicyEnable: "true",
NetworkPolicyAdditionalNamespaces: []string{AdditionalNamespaces},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// Scenario1: Verify flows are exported with Kafka DeploymentModel and with Loki enabled
g.By("Verify flowcollector is deployed with KAFKA exporter")
exporterType, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.exporters[0].type}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(exporterType).To(o.Equal(`'Kafka'`))
g.By("Verify flowcollector is deployed with openshift-ingress in additionalNamepsaces section")
addNamespaces, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.networkPolicy.additionalNamespaces[0]}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(addNamespaces).To(o.Equal(`'openshift-ingress'`))
g.By("Ensure flows are observed, all pods are running and secrets are synced and plugin pod is deployed")
flow.WaitForFlowcollectorReady(oc)
// ensure certs are synced to privileged NS
secrets, err := getSecrets(oc, namespace+"-privileged")
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secrets).To(o.And(o.ContainSubstring(kafkaUser.UserName), o.ContainSubstring(kafka.Name+"-cluster-ca-cert")))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy Kafka consumer pod")
// using amq-streams/kafka-34-rhel8:2.5.2 version. Update if imagePull issues are observed
consumerTemplate := filePath.Join(kafkaDir, "topic-consumer-tls.yaml")
consumer := Resource{"job", kafkaTopic2.TopicName + "-consumer", namespace}
defer consumer.clear(oc)
err = consumer.applyFromTemplate(oc, "-n", consumer.Namespace, "-f", consumerTemplate, "-p", "NAME="+consumer.Name, "NAMESPACE="+consumer.Namespace, "KAFKA_TOPIC="+kafkaTopic2.TopicName, "CLUSTER_NAME="+kafka.Name, "KAFKA_USER="+kafkaUser.UserName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForPodsReadyWithLabel(oc, namespace, "job-name="+consumer.Name)
consumerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", "job-name="+consumer.Name, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify Kafka consumer pod logs")
podLogs, err := exutil.WaitAndGetSpecificPodLogs(oc, namespace, "", consumerPodName, `'{"AgentIP":'`)
exutil.AssertWaitPollNoErr(err, "Did not get log for the pod with job-name=network-flows-export-consumer label")
verifyFlowRecordFromLogs(podLogs)
g.By("Verify NetObserv can be installed without Loki")
flow.DeleteFlowcollector(oc)
// Ensure FLP and eBPF pods are deleted
checkPodDeleted(oc, namespace, "app=flowlogs-pipeline", "flowlogs-pipeline")
checkPodDeleted(oc, namespace+"-privileged", "app=netobserv-ebpf-agent", "netobserv-ebpf-agent")
// Ensure network-policy is deleted
checkResourceDeleted(oc, "networkPolicy", "netobserv", flow.Namespace)
flow.DeploymentModel = "Direct"
flow.LokiEnable = "false"
flow.NetworkPolicyEnable = "false"
flow.CreateFlowcollector(oc)
g.By("Verify Kafka consumer pod logs")
podLogs, err = exutil.WaitAndGetSpecificPodLogs(oc, namespace, "", consumerPodName, `'{"AgentIP":'`)
exutil.AssertWaitPollNoErr(err, "Did not get log for the pod with job-name=network-flows-export-consumer label")
verifyFlowRecordFromLogs(podLogs)
g.By("Verify console plugin pod is not deployed when its disabled in flowcollector")
flow.DeleteFlowcollector(oc)
// Ensure FLP and eBPF pods are deleted
checkPodDeleted(oc, namespace, "app=flowlogs-pipeline", "flowlogs-pipeline")
checkPodDeleted(oc, namespace+"-privileged", "app=netobserv-ebpf-agent", "netobserv-ebpf-agent")
flow.PluginEnable = "false"
flow.CreateFlowcollector(oc)
// Scenario3: Verify all pods except plugin pod are present with only Plugin disabled in flowcollector
g.By("Ensure all pods except consolePlugin pod are deployed")
flow.WaitForFlowcollectorReady(oc)
consolePod, err := exutil.GetAllPodsWithLabel(oc, namespace, "app=netobserv-plugin")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(consolePod)).To(o.Equal(0))
g.By("Ensure all pods are running")
flow.WaitForFlowcollectorReady(oc)
})
g.It("Author:aramesha-NonPreRelease-High-64880-High-75340-Verify secrets copied for Loki and Kafka when deployed in NS other than flowcollector pods [Serial]", func() {
namespace := oc.Namespace()
g.By("Create a new namespace for flowcollector")
flowNS := "netobserv-test"
defer oc.DeleteSpecifiedNamespaceAsAdmin(flowNS)
oc.CreateSpecifiedNamespaceAsAdmin(flowNS)
g.By("Deploy FlowCollector with Kafka TLS")
lokiURL := fmt.Sprintf("https://%s-gateway-http.%s.svc.cluster.local:8080/api/logs/v1/network/", ls.Name, namespace)
flow := Flowcollector{
Namespace: flowNS,
DeploymentModel: "Kafka",
LokiMode: "Manual",
Template: flowFixturePath,
LokiURL: lokiURL,
LokiTLSCertName: fmt.Sprintf("%s-gateway-ca-bundle", ls.Name),
LokiNamespace: namespace,
KafkaAddress: fmt.Sprintf("kafka-cluster-kafka-bootstrap.%s:9093", namespace),
KafkaTLSEnable: "true",
KafkaNamespace: namespace,
NetworkPolicyEnable: "true",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify networkPolicy is deployed")
networkPolicy, err := oc.AsAdmin().Run("get").Args("networkPolicy", "netobserv", "-n", flow.Namespace).Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(networkPolicy).NotTo(o.BeEmpty())
// ensure certs are synced to privileged NS
secrets, err := getSecrets(oc, flowNS+"-privileged")
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secrets).To(o.And(o.ContainSubstring(kafkaUser.UserName), o.ContainSubstring(kafka.Name+"-cluster-ca-cert")))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", flowNS)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", flowNS)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", flowNS)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
})
//Add future NetObserv + Loki + Kafka test-cases here
})
})
|
package netobserv
| ||||
test case
|
openshift/openshift-tests-private
|
91b6b66f-22de-4c33-9ca0-ea1aae939a36
|
Author:memodi-High-53595-High-49107-High-45304-High-54929-High-54840-High-68310-Verify flow correctness and metrics [Serial]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:memodi-High-53595-High-49107-High-45304-High-54929-High-54840-High-68310-Verify flow correctness and metrics [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-54929",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-54929",
ObjectSize: "100K",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
startTime := time.Now()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("get flowlogs from loki")
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testServerTemplate.ServerNS,
DstK8S_Namespace: testClientTemplate.ClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
g.By("Wait for 2 mins before logs gets collected and written to loki")
time.Sleep(120 * time.Second)
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords > 0")
// verify flow correctness
verifyFlowCorrectness(testClientTemplate.ObjectSize, flowRecords)
// verify inner metrics
query := fmt.Sprintf(`sum(rate(netobserv_workload_ingress_bytes_total{SrcK8S_Namespace="%s"}[1m]))`, testClientTemplate.ClientNS)
metrics := pollMetrics(oc, query)
// verfy metric is between 270 and 330
o.Expect(metrics).Should(o.BeNumerically("~", 330, 270))
})
| |||||
test case
|
openshift/openshift-tests-private
|
b6231157-29ed-42ca-b3d9-172e043d8419
|
Author:aramesha-NonPreRelease-Longduration-High-60701-Verify connection tracking [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-Longduration-High-60701-Verify connection tracking [Serial]", func() {
namespace := oc.Namespace()
startTime := time.Now()
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-60701",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-60701",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Deploy FlowCollector with endConversations LogType")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LogType: "EndedConversations",
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ClientNS,
DstK8S_Namespace: testClientTemplate.ServerNS,
RecordType: "endConnection",
DstK8S_OwnerName: "nginx-service",
}
g.By("Verify endConnection Records from loki")
endConnectionRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(endConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of endConnectionRecords > 0")
verifyConversationRecordTime(endConnectionRecords)
g.By("Deploy FlowCollector with Conversations LogType")
flow.DeleteFlowcollector(oc)
flow.LogType = "Conversations"
flow.CreateFlowcollector(oc)
g.By("Ensure flows are observed and all pods are running")
flow.WaitForFlowcollectorReady(oc)
g.By("Escalate SA to cluster admin")
bearerToken = getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime = time.Now()
time.Sleep(60 * time.Second)
g.By("Verify NewConnection Records from loki")
lokilabels.RecordType = "newConnection"
newConnectionRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(newConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of newConnectionRecords > 0")
verifyConversationRecordTime(newConnectionRecords)
g.By("Verify HeartbeatConnection Records from loki")
lokilabels.RecordType = "heartbeat"
heartbeatConnectionRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(heartbeatConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of heartbeatConnectionRecords > 0")
verifyConversationRecordTime(heartbeatConnectionRecords)
g.By("Verify EndConnection Records from loki")
lokilabels.RecordType = "endConnection"
endConnectionRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(endConnectionRecords)).Should(o.BeNumerically(">", 0), "expected number of endConnectionRecords > 0")
verifyConversationRecordTime(endConnectionRecords)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e7c68b92-7678-44a3-8c41-598750e34c6c
|
Author:memodi-NonPreRelease-Longduration-High-63839-Verify-multi-tenancy [Disruptive][Slow]
|
['"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:memodi-NonPreRelease-Longduration-High-63839-Verify-multi-tenancy [Disruptive][Slow]", func() {
namespace := oc.Namespace()
users, usersHTpassFile, htPassSecret := getNewUser(oc, 2)
defer userCleanup(oc, users, usersHTpassFile, htPassSecret)
g.By("Creating client server template and template CRBs for testusers")
// create templates for testuser to be used later
testUserstemplate := filePath.Join(baseDir, "testuser-client-server_template.yaml")
stdout, stderr, err := oc.AsAdmin().Run("apply").Args("-f", testUserstemplate).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stderr).To(o.BeEmpty())
templateResource := strings.Split(stdout, " ")[0]
templateName := strings.Split(templateResource, "/")[1]
defer removeTemplatePermissions(oc, users[0].Username)
addTemplatePermissions(oc, users[0].Username)
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-63839",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err = testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-63839",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
o.Expect(err).NotTo(o.HaveOccurred())
// save original context
origContxt, contxtErr := oc.AsAdmin().WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
e2e.Logf("orginal context is %v", origContxt)
defer removeUserAsReader(oc, users[0].Username)
addUserAsReader(oc, users[0].Username)
origUser := oc.Username()
e2e.Logf("current user is %s", origUser)
defer oc.AsAdmin().WithoutNamespace().Run("config").Args("use-context", origContxt).Execute()
defer oc.ChangeUser(origUser)
oc.ChangeUser(users[0].Username)
curUser := oc.Username()
e2e.Logf("current user is %s", curUser)
o.Expect(err).NotTo(o.HaveOccurred())
user0Contxt, contxtErr := oc.WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
e2e.Logf("user0 context is %v", user0Contxt)
g.By("Deploying test server and client pods as user0")
var (
testUserServerNS = fmt.Sprintf("%s-server", users[0].Username)
testUserClientNS = fmt.Sprintf("%s-client", users[0].Username)
)
defer oc.DeleteSpecifiedNamespaceAsAdmin(testUserClientNS)
defer oc.DeleteSpecifiedNamespaceAsAdmin(testUserServerNS)
configFile := exutil.ProcessTemplate(oc, "--ignore-unknown-parameters=true", templateName, "-p", "SERVER_NS="+testUserServerNS, "-p", "CLIENT_NS="+testUserClientNS)
err = oc.WithoutNamespace().Run("create").Args("-f", configFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// only required to getFlowLogs
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testUserServerNS,
DstK8S_Namespace: testUserClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
user0token, err := oc.WithoutNamespace().Run("whoami").Args("-t").Output()
e2e.Logf("token is %s", user0token)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("get flowlogs from loki")
flowRecords, err := lokilabels.getLokiFlowLogs(user0token, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords > 0")
g.By("verify no logs are fetched from an NS that user is not admin for")
lokilabels = Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ServerNS,
DstK8S_Namespace: testClientTemplate.ClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
flowRecords, err = lokilabels.getLokiFlowLogs(user0token, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).NotTo(o.BeNumerically(">", 0), "expected number of flowRecords to be equal to 0")
})
| |||||
test case
|
openshift/openshift-tests-private
|
06735b32-9592-4db0-983d-8e7cffc1be5c
|
Author:aramesha-NonPreRelease-High-59746-NetObserv upgrade testing [Serial]
|
['"encoding/json"', '"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-High-59746-NetObserv upgrade testing [Serial]", func() {
namespace := oc.Namespace()
g.By("Uninstall operator deployed by BeforeEach and delete operator NS")
NO.uninstallOperator(oc)
oc.DeleteSpecifiedNamespaceAsAdmin(netobservNS)
g.By("Deploy older version of netobserv operator")
NOcatSrc = Resource{"catsrc", "redhat-operators", "openshift-marketplace"}
NOSource = CatalogSourceObjects{"stable", NOcatSrc.Name, NOcatSrc.Namespace}
NO.CatalogSource = &NOSource
g.By(fmt.Sprintf("Subscribe operators to %s channel", NOSource.Channel))
OperatorNS.DeployOperatorNamespace(oc)
NO.SubscribeOperator(oc)
// check if NO operator is deployed
WaitForPodsReadyWithLabel(oc, netobservNS, "app="+NO.OperatorName)
NOStatus := CheckOperatorStatus(oc, netobservNS, NOPackageName)
o.Expect((NOStatus)).To(o.BeTrue())
// check if flowcollector API exists
flowcollectorAPIExists, err := isFlowCollectorAPIExists(oc)
o.Expect((flowcollectorAPIExists)).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Get NetObserv and components versions")
NOCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradeNOVersion := strings.Split(NOCSV, ".v")[1]
preUpgradeEBPFVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[0].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradeEBPFVersion = strings.Split(preUpgradeEBPFVersion, ":")[1]
preUpgradeFLPVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[1].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradeFLPVersion = strings.Split(preUpgradeFLPVersion, ":")[1]
preUpgradePluginVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[2].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
preUpgradePluginVersion = strings.Split(preUpgradePluginVersion, ":")[1]
g.By("Upgrade NetObserv to latest version")
oc.AsAdmin().WithoutNamespace().Run("patch").Args("subscription", "netobserv-operator", "-n", netobservNS, "-p", `[{"op": "replace", "path": "/spec/source", "value": "netobserv-konflux-fbc"}]`, "--type=json").Output()
g.By("Wait for a min for operator upgrade")
time.Sleep(60 * time.Second)
WaitForPodsReadyWithLabel(oc, netobservNS, "app=netobserv-operator")
NOStatus = CheckOperatorStatus(oc, netobservNS, NOPackageName)
o.Expect((NOStatus)).To(o.BeTrue())
g.By("Get NetObserv operator and components versions")
NOCSV, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[?(@.name=='OPERATOR_CONDITION_NAME')].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradeNOVersion := strings.Split(NOCSV, ".v")[1]
postUpgradeEBPFVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[0].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradeEBPFVersion = strings.Split(postUpgradeEBPFVersion, ":")[1]
postUpgradeFLPVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[1].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradeFLPVersion = strings.Split(postUpgradeFLPVersion, ":")[1]
postUpgradePluginVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=netobserv-operator", "-n", netobservNS, "-o=jsonpath={.items[*].spec.containers[0].env[2].value}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
postUpgradePluginVersion = strings.Split(postUpgradePluginVersion, ":")[1]
g.By("Verify versions are updated")
o.Expect(preUpgradeNOVersion).NotTo(o.Equal(postUpgradeNOVersion))
o.Expect(preUpgradeEBPFVersion).NotTo(o.Equal(postUpgradeEBPFVersion))
o.Expect(preUpgradeFLPVersion).NotTo(o.Equal(postUpgradeFLPVersion))
o.Expect(preUpgradePluginVersion).NotTo(o.Equal(postUpgradePluginVersion))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
20b45fcb-1bc6-4ee9-b422-7d205ac84d7b
|
Author:aramesha-NonPreRelease-High-62989-Verify SCTP, ICMP, ICMPv6 traffic is observed [Disruptive]
|
['"os/exec"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-High-62989-Verify SCTP, ICMP, ICMPv6 traffic is observed [Disruptive]", func() {
namespace := oc.Namespace()
var (
sctpClientPodTemplatePath = filePath.Join(networkingDir, "sctpclient.yaml")
sctpServerPodTemplatePath = filePath.Join(networkingDir, "sctpserver.yaml")
sctpServerPodname = "sctpserver"
sctpClientPodname = "sctpclient"
)
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc)
g.By("Create netobserv-sctp NS")
SCTPns := "netobserv-sctp-62989"
defer oc.DeleteSpecifiedNamespaceAsAdmin(SCTPns)
oc.CreateSpecifiedNamespaceAsAdmin(SCTPns)
exutil.SetNamespacePrivileged(oc, SCTPns)
g.By("create sctpClientPod")
createResourceFromFile(oc, SCTPns, sctpClientPodTemplatePath)
WaitForPodsReadyWithLabel(oc, SCTPns, "name=sctpclient")
g.By("create sctpServerPod")
createResourceFromFile(oc, SCTPns, sctpServerPodTemplatePath)
WaitForPodsReadyWithLabel(oc, SCTPns, "name=sctpserver")
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
ipStackType := checkIPStackType(oc)
var sctpServerPodIP string
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP = getPodIPv4(oc, SCTPns, sctpServerPodname)
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP = getPodIPv6(oc, SCTPns, sctpServerPodname, ipStackType)
}
g.By("sctpserver pod start to wait for sctp traffic")
cmd, _, _, _ := oc.AsAdmin().Run("exec").Args("-n", SCTPns, sctpServerPodname, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmd.Process.Kill()
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(SCTPns, sctpServerPodname, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
startTime := time.Now()
e2eoutput.RunHostCmd(SCTPns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(SCTPns, sctpServerPodname, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
// Scenario1: Verify SCTP traffic
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: SCTPns,
DstK8S_Namespace: SCTPns,
}
g.By("Verify SCTP flows are seen on loki")
parameters := []string{"Proto=\"132\"", "DstPort=\"30102\""}
SCTPflows, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(SCTPflows)).Should(o.BeNumerically(">", 0), "expected number of SCTP flows > 0")
// Scenario2: Verify ICMP traffic
g.By("sctpclient ping sctpserver")
e2eoutput.RunHostCmd(SCTPns, sctpClientPodname, "ping -c 10 "+sctpServerPodIP)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
parameters = []string{"Proto=\"1\""}
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
parameters = []string{"Proto=\"58\""}
}
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
ICMPflows, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(ICMPflows)).Should(o.BeNumerically(">", 0), "expected number of ICMP flows > 0")
nICMPFlows := 0
for _, r := range ICMPflows {
if r.Flowlog.IcmpType == 8 || r.Flowlog.IcmpType == 0 {
nICMPFlows++
}
}
o.Expect(nICMPFlows).Should(o.BeNumerically(">", 0), "expected number of ICMP flows of type 8 or 0 (echo request or reply) > 0")
})
| |||||
test case
|
openshift/openshift-tests-private
|
bd036ea4-d493-4e70-b06e-d0cceddc0054
|
Author:aramesha-NonPreRelease-LEVEL0-High-68125-Verify DSCP with NetObserv [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-LEVEL0-High-68125-Verify DSCP with NetObserv [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploying test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-68125",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-68125",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
exutil.By("Check cluster network type")
networkType := exutil.CheckNetworkType(oc)
o.Expect(networkType).NotTo(o.BeEmpty())
if networkType == "ovnkubernetes" {
g.By("Deploy egressQoS for OVN CNI")
clientDSCPPath := filePath.Join(networkingDir, "test-client-DSCP.yaml")
egressQoSPath := filePath.Join(networkingDir, "egressQoS.yaml")
g.By("Deploy nginx client pod and egressQoS")
createResourceFromFile(oc, testClientTemplate.ClientNS, clientDSCPPath)
createResourceFromFile(oc, testClientTemplate.ClientNS, egressQoSPath)
}
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
// Scenario1: Verify default DSCP value=0
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ClientNS,
DstK8S_Namespace: testClientTemplate.ServerNS,
}
parameters := []string{"SrcK8S_Name=\"client\""}
g.By("Verify DSCP value=0")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.Dscp).To(o.Equal(0))
}
// Scenario2: Verify egress QoS feature for OVN CNI
if networkType == "ovnkubernetes" {
parameters = []string{"SrcK8S_Name=\"client-dscp\", Dscp=\"59\""}
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("Verify DSCP value=59 for flows from DSCP client pod")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows with DSCP value 59 should be > 0")
g.By("Verify DSCP value=0 for flows from pods other than DSCP client pod in test-client namespace")
parameters = []string{"SrcK8S_Name=\"client\", Dscp=\"0\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows with DSCP value 0 should be > 0")
}
// Scenario3: Explicitly passing QoS value in ping command
ipStackType := checkIPStackType(oc)
var destinationIP string
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
destinationIP = "1.1.1.1"
} else if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
destinationIP = "::1"
}
g.By("Ping loopback address with custom QoS from client pod")
startTime = time.Now()
e2eoutput.RunHostCmd(testClientTemplate.ClientNS, "client", "ping -c 10 -Q 0x80 "+destinationIP)
lokilabels = Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ClientNS,
}
parameters = []string{"DstAddr=\"" + destinationIP + "\""}
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("Verify DSCP value=32")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.Dscp).To(o.Equal(32))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d243c565-26c1-4956-a217-a6809eb6f883
|
Author:aramesha-NonPreRelease-High-69218-High-71291-Verify cluster ID and zone in multiCluster deployment [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-High-69218-High-71291-Verify cluster ID and zone in multiCluster deployment [Serial]", func() {
namespace := oc.Namespace()
g.By("Get clusterID of the cluster")
clusterID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[].spec.clusterID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster ID is %s", clusterID)
g.By("Deploy FlowCollector with multiCluster and addZone enabled")
flow := Flowcollector{
Namespace: namespace,
MultiClusterDeployment: "true",
AddZone: "true",
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Verify K8S_ClusterName = Cluster ID")
clusteridlabels := Lokilabels{
App: "netobserv-flowcollector",
K8S_ClusterName: clusterID,
}
clusterIdFlowRecords, err := clusteridlabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(clusterIdFlowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows > 0")
g.By("Verify SrcK8S_Zone and DstK8S_Zone are present and have expected values")
zonelabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Type: "Node",
DstK8S_Type: "Node",
}
zoneFlowRecords, err := zonelabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
for _, r := range zoneFlowRecords {
expectedSrcK8SZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", r.Flowlog.SrcK8S_HostName, "-o=jsonpath={.metadata.labels.topology\\.kubernetes\\.io/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(r.Flowlog.SrcK8S_Zone).To(o.Equal(expectedSrcK8SZone))
expectedDstK8SZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", r.Flowlog.DstK8S_HostName, "-o=jsonpath={.metadata.labels.topology\\.kubernetes\\.io/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(r.Flowlog.DstK8S_Zone).To(o.Equal(expectedDstK8SZone))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
94e4d999-72fc-4910-a905-d64b394a9f8e
|
Author:memodi-NonPreRelease-Longduration-Medium-60664-Medium-61482-Alerts-with-NetObserv [Serial][Slow]
|
['"encoding/json"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:memodi-NonPreRelease-Longduration-Medium-60664-Medium-61482-Alerts-with-NetObserv [Serial][Slow]", func() {
namespace := oc.Namespace()
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// verify configured alerts for flp
g.By("Get FLP Alert name and Alert Rules")
FLPAlertRuleName := "flowlogs-pipeline-alert"
rules, err := getConfiguredAlertRules(oc, FLPAlertRuleName, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rules).To(o.ContainSubstring("NetObservNoFlows"))
o.Expect(rules).To(o.ContainSubstring("NetObservLokiError"))
// verify configured alerts for ebpf-agent
g.By("Get EBPF Alert name and Alert Rules")
ebpfAlertRuleName := "ebpf-agent-prom-alert"
ebpfRules, err := getConfiguredAlertRules(oc, ebpfAlertRuleName, namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ebpfRules).To(o.ContainSubstring("NetObservDroppedFlows"))
// verify disable alerts feature
g.By("Verify alerts can be disabled")
gen, err := getResourceGeneration(oc, "prometheusRule", "flowlogs-pipeline-alert", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
disableAlertPatchTemp := `[{"op": "$op", "path": "/spec/processor/metrics/disableAlerts", "value": ["NetObservLokiError"]}]`
disableAlertPatch := strings.Replace(disableAlertPatchTemp, "$op", "add", 1)
out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "--type=json", "-p", disableAlertPatch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("patched"))
waitForResourceGenerationUpdate(oc, "prometheusRule", FLPAlertRuleName, "generation", gen, namespace)
rules, err = getConfiguredAlertRules(oc, FLPAlertRuleName, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rules).To(o.ContainSubstring("NetObservNoFlows"))
o.Expect(rules).ToNot(o.ContainSubstring("NetObservLokiError"))
gen, err = getResourceGeneration(oc, "prometheusRule", "flowlogs-pipeline-alert", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
disableAlertPatch = strings.Replace(disableAlertPatchTemp, "$op", "remove", 1)
out, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "--type=json", "-p", disableAlertPatch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("patched"))
waitForResourceGenerationUpdate(oc, "prometheusRule", FLPAlertRuleName, "generation", gen, namespace)
rules, err = getConfiguredAlertRules(oc, FLPAlertRuleName, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rules).To(o.ContainSubstring("NetObservNoFlows"))
o.Expect(rules).To(o.ContainSubstring("NetObservLokiError"))
g.By("delete flowcollector")
flow.DeleteFlowcollector(oc)
// verify alert firing.
// configure flowcollector with incorrect loki URL
// configure very low CacheMaxFlows to have ebpf alert fired.
flow = Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
CacheMaxFlows: "100",
LokiMode: "Monolithic",
MonolithicLokiURL: "http://loki.no-ns.svc:3100",
}
g.By("Deploy flowcollector with incorrect loki URL and lower cacheMaxFlows value")
flow.CreateFlowcollector(oc)
flow.WaitForFlowcollectorReady(oc)
g.By("Wait for alerts to be active")
waitForAlertToBeActive(oc, "NetObservLokiError")
})
| |||||
test case
|
openshift/openshift-tests-private
|
5f851fc1-dde8-48a5-acd3-2c171e368489
|
Author:aramesha-NonPreRelease-Medium-72875-Verify nodeSelector and tolerations with netobserv components [Serial]
|
['"encoding/json"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-Medium-72875-Verify nodeSelector and tolerations with netobserv components [Serial]", func() {
namespace := oc.Namespace()
// verify tolerations
g.By("Get worker node of the cluster")
workerNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Taint worker node")
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", workerNode, "netobserv-agent", "--overwrite").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("taint", "node", workerNode, "netobserv-agent=true:NoSchedule", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Add wrong toleration for eBPF spec for the taint netobserv-agent=false:NoSchedule")
patchValue := `{"scheduling":{"tolerations":[{"effect": "NoSchedule", "key": "netobserv-agent", "value": "false", "operator": "Equal"}]}}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/advanced", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready")
flow.WaitForFlowcollectorReady(oc)
g.By(fmt.Sprintf("Verify eBPF pod is not scheduled on the %s", workerNode))
eBPFPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", flow.Namespace+"-privileged", "pods", "--field-selector", "spec.nodeName="+workerNode+"", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(eBPFPod).Should(o.BeEmpty())
g.By("Add correct toleration for eBPF spec for the taint netobserv-agent=true:NoSchedule")
flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
patchValue = `{"scheduling":{"tolerations":[{"effect": "NoSchedule", "key": "netobserv-agent", "value": "true", "operator": "Equal"}]}}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/advanced", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready")
flow.WaitForFlowcollectorReady(oc)
g.By(fmt.Sprintf("Verify eBPF pod is scheduled on the node %s after applying toleration for taint netobserv-agent=true:NoSchedule", workerNode))
eBPFPod, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", flow.Namespace+"-privileged", "pods", "--field-selector", "spec.nodeName="+workerNode+"", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(eBPFPod).NotTo(o.BeEmpty())
// verify nodeSelector
g.By("Add netobserv label to above worker node")
defer exutil.DeleteLabelFromNode(oc, workerNode, "test")
exutil.AddLabelToNode(oc, workerNode, "netobserv-agent", "true")
g.By("Patch flowcollector with nodeSelector for eBPF pods")
flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
patchValue = `{"scheduling":{"nodeSelector":{"netobserv-agent": "true"}}}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/advanced", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready")
flow.WaitForFlowcollectorReady(oc)
g.By("Verify all eBPF pods are deployed on the above worker node")
eBPFpods, err := exutil.GetAllPodsWithLabel(oc, flow.Namespace+"-privileged", "app=netobserv-ebpf-agent")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range eBPFpods {
nodeName, err := exutil.GetPodNodeName(oc, flow.Namespace+"-privileged", pod)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeName).To(o.Equal(workerNode))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
6be5c93c-baa4-4fa8-8ea6-9339598e9d32
|
Author:memodi-Medium-63185-Verify NetOberv must-gather plugin [Serial]
|
['"fmt"', '"os"', '"os/exec"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:memodi-Medium-63185-Verify NetOberv must-gather plugin [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Run must-gather command")
mustGatherDir := "/tmp/must-gather-63185"
defer exec.Command("bash", "-c", "rm -rf "+mustGatherDir).Output()
output, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--image", "quay.io/netobserv/must-gather", "--dest-dir="+mustGatherDir).Output()
o.Expect(output).NotTo(o.ContainSubstring("error"))
g.By("Verify operator namespace logs are scraped")
mustGatherDir = mustGatherDir + "/quay-io-netobserv-must-gather-*"
operatorlogs, err := filePath.Glob(fmt.Sprintf("%s/namespaces/openshift-netobserv-operator/pods/netobserv-controller-manager-*/manager/manager/logs/current.log", mustGatherDir))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(operatorlogs[0])
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify flowlogs-pipeline pod logs are scraped")
pods, err := exutil.GetAllPods(oc, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
podlogs, err := filePath.Glob(fmt.Sprintf("%s/namespaces/%s/pods/%s/flowlogs-pipeline/flowlogs-pipeline/logs/current.log", mustGatherDir, namespace, pods[0]))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(podlogs[0])
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify eBPF agent pod logs are scraped")
ebpfPods, err := exutil.GetAllPods(oc, namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
ebpflogs, err := filePath.Glob(fmt.Sprintf("%s/namespaces/%s/pods/%s/netobserv-ebpf-agent/netobserv-ebpf-agent/logs/current.log", mustGatherDir, namespace+"-privileged", ebpfPods[0]))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(ebpflogs[0])
o.Expect(err).NotTo(o.HaveOccurred())
// TODO: once supported add a check for flowcollector dumped file.
})
| |||||
test case
|
openshift/openshift-tests-private
|
5314c734-9682-4f42-ba94-51059d03b3ea
|
Author:aramesha-NonPreRelease-High-73175-Verify eBPF agent filtering [Serial]
|
['"encoding/json"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-High-73175-Verify eBPF agent filtering [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector with eBPF agent flowFilter to Reject flows with SrcPort 53 and UDP protocol")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// Scenario1: With REJECT action
g.By("Patch flowcollector with eBPF agent flowFilter to Reject flows with SrcPort 53 and UDP Protocol")
action := "Reject"
patchValue := `{"action": "` + action + `", "cidr": "0.0.0.0/0", "protocol": "UDP", "sourcePorts": "53", "enable": true}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/flowFilter", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready with Reject flowFilter")
flow.WaitForFlowcollectorReady(oc)
// check if patch is successful
flowPatch, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.agent.ebpf.flowFilter.action}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'Reject'`))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify number of flows with on UDP Protcol with SrcPort 53 = 0")
parameters := []string{"Proto=\"17\"", "SrcPort=\"53\""}
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically("==", 0), "expected number of flows on UDP with SrcPort 53 = 0")
g.By("Verify number of flows on TCP Protocol > 0")
parameters = []string{"Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows on TCP > 0")
// Scenario2: With ACCEPT action
g.By("Patch flowcollector with eBPF agent flowFilter to Accept flows with SrcPort 53")
action = "Accept"
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/flowFilter/action", "value": `+action+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready with Accept flowFilter")
flow.WaitForFlowcollectorReady(oc)
// check if patch is successful
flowPatch, err = oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.agent.ebpf.flowFilter.action}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'Accept'`))
g.By("Wait for a min before logs gets collected and written to loki")
startTime = time.Now()
time.Sleep(60 * time.Second)
g.By("Verify number of flows on UDP Protocol with SrcPort 53 > 0")
parameters = []string{"Proto=\"17\"", "SrcPort=\"53\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows on UDP with SrcPort 53 > 0")
g.By("Verify number of flows on TCP Protocol = 0")
parameters = []string{"Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically("==", 0), "expected number of flows on TCP = 0")
g.By("Verify prometheus is able to scrape eBPF metrics")
verifyEBPFFilterMetrics(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
99d58920-cac6-4534-b668-ad9278b27c62
|
Author:memodi-Medium-53844-Sanity Test NetObserv [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:memodi-Medium-53844-Sanity Test NetObserv [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err := addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify flows are written to loki")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows written to loki > 0")
})
| |||||
test case
|
openshift/openshift-tests-private
|
1aedb5c4-4f1c-420f-a65d-4ae74be1f9af
|
Author:aramesha-High-67782-Verify large volume downloads [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-High-67782-Verify large volume downloads [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFCacheActiveTimeout: "30s",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err := addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Deploy test server and client pods")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-67782",
Template: serverTemplate,
LargeBlob: "yes",
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err = testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-67782",
ObjectSize: "100M",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Wait for 2 mins before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(120 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testClientTemplate.ServerNS,
DstK8S_Namespace: testClientTemplate.ClientNS,
SrcK8S_OwnerName: "nginx-service",
FlowDirection: "0",
}
g.By("Verify flows are written to loki")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows written to loki > 0")
g.By("Verify flow correctness")
verifyFlowCorrectness(testClientTemplate.ObjectSize, flowRecords)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5649a393-cae6-4d95-a6e8-82733549ba13
|
Author:aramesha-High-75656-Verify TCP flags [Disruptive]
|
['"encoding/json"', '"os"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-High-75656-Verify TCP flags [Disruptive]", func() {
namespace := oc.Namespace()
SYNFloodMetricsPath := filePath.Join(baseDir, "SYN_flood_metrics_template.yaml")
SYNFloodAlertsPath := filePath.Join(baseDir, "SYN_flood_alert_template.yaml")
g.By("Get kubeadmin token")
kubeAdminPasswd := os.Getenv("QE_KUBEADMIN_PASSWORD")
if kubeAdminPasswd == "" {
g.Skip("no kubeAdminPasswd is provided in this profile, skip it")
}
serverUrl, serverUrlErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-server").Output()
o.Expect(serverUrlErr).NotTo(o.HaveOccurred())
currentContext, currentContextErr := oc.WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(currentContextErr).NotTo(o.HaveOccurred())
defer func() {
rollbackCtxErr := oc.WithoutNamespace().Run("config").Args("set", "current-context", currentContext).Execute()
o.Expect(rollbackCtxErr).NotTo(o.HaveOccurred())
}()
kubeadminToken := getKubeAdminToken(oc, kubeAdminPasswd, serverUrl, currentContext)
o.Expect(kubeadminToken).NotTo(o.BeEmpty())
g.By("Deploy FlowCollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Patch flowcollector with eBPF agent flowFilter to Reject flows with tcpFlags SYN-ACK and TCP Protocol")
patchValue := `{"action": "Reject", "cidr": "0.0.0.0/0", "protocol": "TCP", "tcpFlags": "SYN-ACK", "enable": true}`
oc.AsAdmin().WithoutNamespace().Run("patch").Args("flowcollector", "cluster", "-p", `[{"op": "replace", "path": "/spec/agent/ebpf/flowFilter", "value": `+patchValue+`}]`, "--type=json").Output()
g.By("Ensure flowcollector is ready with Reject flowFilter")
flow.WaitForFlowcollectorReady(oc)
// check if patch is successful
flowPatch, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.agent.ebpf.flowFilter.action}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(flowPatch).To(o.Equal(`'Reject'`))
g.By("Deploy custom metrics to detect SYN flooding")
customMetrics := CustomMetrics{
Namespace: namespace,
Template: SYNFloodMetricsPath,
}
curv, err := getResourceVersion(oc, "cm", "flowlogs-pipeline-config-dynamic", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
customMetrics.createCustomMetrics(oc)
waitForResourceGenerationUpdate(oc, "cm", "flowlogs-pipeline-config-dynamic", "resourceVersion", curv, namespace)
g.By("Deploy SYN flooding alert rule")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertingrule.monitoring.openshift.io", "netobserv-syn-alerts", "-n", "openshift-monitoring")
configFile := exutil.ProcessTemplate(oc, "--ignore-unknown-parameters=true", "-f", SYNFloodAlertsPath, "-p", "Namespace=openshift-monitoring")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
o.Expect(err).ToNot(o.HaveOccurred())
g.By("Deploy test client pod to induce SYN flooding")
template := filePath.Join(baseDir, "test-SYN-flood-client_template.yaml")
testTemplate := TestClientTemplate{
ClientNS: "test-client-75656",
Template: template,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testTemplate.ClientNS)
configFile = exutil.ProcessTemplate(oc, "--ignore-unknown-parameters=true", "-f", testTemplate.Template, "-p", "CLIENT_NS="+testTemplate.ClientNS)
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
o.Expect(err).ToNot(o.HaveOccurred())
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify no flows with SYN_ACK TCP flag")
parameters := []string{"Flags=\"SYN_ACK\""}
flowRecords, err := lokilabels.getLokiFlowLogs(kubeadminToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
// Loop needed since even flows with flags SYN, ACK are matched
count := 0
for _, r := range flowRecords {
for _, f := range r.Flowlog.Flags {
o.Expect(f).ToNot(o.Equal("SYN_ACK"))
}
}
o.Expect(count).Should(o.BeNumerically("==", 0), "expected number of flows with SYN_ACK TCPFlag = 0")
g.By("Verify SYN flooding flows")
parameters = []string{"Flags=\"SYN\"", "DstAddr=\"192.168.1.159\""}
flowRecords, err = lokilabels.getLokiFlowLogs(kubeadminToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of SYN flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.Bytes).Should(o.BeNumerically("==", 54))
}
g.By("Wait for alerts to be active")
waitForAlertToBeActive(oc, "NetObserv-SYNFlood-out")
waitForAlertToBeActive(oc, "NetObserv-SYNFlood-in")
})
| |||||
test case
|
openshift/openshift-tests-private
|
c79d1c58-4967-46db-8e8b-afb76a5c813e
|
Author:aramesha-NonPreRelease-Longduration-High-76537-Verify flow enrichment for VM's secondary interfaces [Disruptive][Slow]
|
['"encoding/json"', '"os"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-Longduration-High-76537-Verify flow enrichment for VM's secondary interfaces [Disruptive][Slow]", func() {
namespace := oc.Namespace()
testNS := "test-76537"
virtOperatorNS := "openshift-cnv"
if !hasMetalWorkerNodes(oc) {
g.Skip("Cluster does not have baremetal workers. Skip this test!")
}
g.By("Get kubeadmin token")
kubeAdminPasswd := os.Getenv("QE_KUBEADMIN_PASSWORD")
if kubeAdminPasswd == "" {
g.Skip("no kubeAdminPasswd is provided in this profile, skip it")
}
serverUrl, serverUrlErr := oc.AsAdmin().WithoutNamespace().Run("whoami").Args("--show-server").Output()
o.Expect(serverUrlErr).NotTo(o.HaveOccurred())
currentContext, currentContextErr := oc.WithoutNamespace().Run("config").Args("current-context").Output()
o.Expect(currentContextErr).NotTo(o.HaveOccurred())
defer func() {
rollbackCtxErr := oc.WithoutNamespace().Run("config").Args("set", "current-context", currentContext).Execute()
o.Expect(rollbackCtxErr).NotTo(o.HaveOccurred())
}()
kubeadminToken := getKubeAdminToken(oc, kubeAdminPasswd, serverUrl, currentContext)
o.Expect(kubeadminToken).NotTo(o.BeEmpty())
virtualizationDir := exutil.FixturePath("testdata", "netobserv", "virtualization")
kubevirtHyperconvergedPath := filePath.Join(virtualizationDir, "kubevirt-hyperconverged.yaml")
layer2NadPath := filePath.Join(virtualizationDir, "layer2-nad.yaml")
testVM1 := filePath.Join(virtualizationDir, "test-vm1.yaml")
testVM2 := filePath.Join(virtualizationDir, "test-vm2.yaml")
g.By("Deploy openshift-cnv namespace")
OperatorNS.Name = virtOperatorNS
OperatorNS.DeployOperatorNamespace(oc)
g.By("Deploy Openshift Virtualization operator")
virtCatsrc := Resource{"catsrc", "redhat-operators", "openshift-marketplace"}
virtPackageName := "kubevirt-hyperconverged"
virtSource := CatalogSourceObjects{"stable", virtCatsrc.Name, virtCatsrc.Namespace}
VO := SubscriptionObjects{
OperatorName: "kubevirt-hyperconverged",
Namespace: virtOperatorNS,
PackageName: virtPackageName,
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "singlenamespace-og.yaml"),
CatalogSource: &virtSource,
}
defer VO.uninstallOperator(oc)
VO.SubscribeOperator(oc)
WaitForPodsReadyWithLabel(oc, VO.Namespace, "name=virt-operator")
g.By("Deploy OpenShift Virtualization Deployment CR")
defer deleteResource(oc, "hyperconverged", "kubevirt-hyperconverged", virtOperatorNS)
_, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", kubevirtHyperconvergedPath).Output()
o.Expect(err).ToNot(o.HaveOccurred())
// Wait a min for hyperconverged pods to come up
time.Sleep(60 * time.Second)
waitUntilHyperConvergedReady(oc, "kubevirt-hyperconverged", virtOperatorNS)
WaitForPodsReadyWithLabel(oc, virtOperatorNS, "app.kubernetes.io/managed-by=virt-operator")
g.By("Deploy Network Attachment Definition in test-76537 namespace")
defer deleteNamespace(oc, testNS)
defer deleteResource(oc, "net-attach-def", "l2-network", testNS)
_, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", layer2NadPath).Output()
o.Expect(err).ToNot(o.HaveOccurred())
// Wait a min for NAD to come up
time.Sleep(60 * time.Second)
checkNAD(oc, "l2-network", testNS)
g.By("Deploy test VM1")
defer deleteResource(oc, "vm", "test-vm1", testNS)
_, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", testVM1, "-n", testNS).Output()
o.Expect(err).ToNot(o.HaveOccurred())
waitUntilVMReady(oc, "test-vm1", testNS)
startTime := time.Now()
g.By("Deploy test VM2")
defer deleteResource(oc, "vm", "test-vm2", testNS)
_, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", testVM2, "-n", testNS).Output()
o.Expect(err).ToNot(o.HaveOccurred())
waitUntilVMReady(oc, "test-vm2", testNS)
secondaryNetworkConfig := map[string]interface{}{
"index": []interface{}{"MAC"},
"name": "test-76537/l2-network",
}
config, err := json.Marshal(secondaryNetworkConfig)
o.Expect(err).ToNot(o.HaveOccurred())
secNetConfig := string(config)
g.By("Deploy FlowCollector with secondary Network config")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFPrivileged: "true",
SecondayNetworks: []string{secNetConfig},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowcollector is deployed with Secondary Network config")
secondaryNetworkName, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.processor.advanced.secondaryNetworks[0].name}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secondaryNetworkName).To(o.Equal(`'test-76537/l2-network'`))
g.By("Wait for a min before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
SrcK8S_Namespace: testNS,
SrcK8S_OwnerName: "test-vm2",
DstK8S_Namespace: testNS,
DstK8S_OwnerName: "test-vm1",
}
parameters := []string{"DstAddr=\"10.10.10.15\"", "SrcAddr=\"10.10.10.14\""}
g.By("Verify flows are written to loki")
flowRecords, err := lokilabels.getLokiFlowLogs(kubeadminToken, ls.Route, startTime, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flows written to loki > 0")
g.By("Verify flow logs are enriched")
// Get VM1 pod name and node
vm1podname, err := exutil.GetAllPodsWithLabel(oc, testNS, "vm.kubevirt.io/name=test-vm1")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm1podname).NotTo(o.BeEmpty())
vm1node, err := exutil.GetPodNodeName(oc, testNS, vm1podname[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm1node).NotTo(o.BeEmpty())
// Get vm2 pod name and node
vm2podname, err := exutil.GetAllPodsWithLabel(oc, testNS, "vm.kubevirt.io/name=test-vm2")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm2podname).NotTo(o.BeEmpty())
vm2node, err := exutil.GetPodNodeName(oc, testNS, vm2podname[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(vm2node).NotTo(o.BeEmpty())
for _, r := range flowRecords {
o.Expect(r.Flowlog.DstK8S_Name).Should(o.ContainSubstring(vm1podname[0]))
o.Expect(r.Flowlog.SrcK8S_Name).Should(o.ContainSubstring(vm2podname[0]))
o.Expect(r.Flowlog.DstK8S_OwnerType).Should(o.ContainSubstring("VirtualMachineInstance"))
o.Expect(r.Flowlog.SrcK8S_OwnerType).Should(o.ContainSubstring("VirtualMachineInstance"))
o.Expect(r.Flowlog.DstK8S_NetworkName).Should(o.ContainSubstring("test-76537/l2-network"))
o.Expect(r.Flowlog.SrcK8S_NetworkName).Should(o.ContainSubstring("test-76537/l2-network"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
2eaf3bcd-cc8c-4ed7-bdb1-e8c819e023e8
|
Author:aramesha-NonPreRelease-Longduration-Medium-78480-NetObserv with sampling 50 [Serial][Slow]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-Longduration-Medium-78480-NetObserv with sampling 50 [Serial][Slow]", func() {
namespace := oc.Namespace()
g.By("Deploy DNS pods")
DNSTemplate := filePath.Join(baseDir, "DNS-pods.yaml")
DNSNamespace := "dns-traffic"
defer oc.DeleteSpecifiedNamespaceAsAdmin(DNSNamespace)
ApplyResourceFromFile(oc, DNSNamespace, DNSTemplate)
exutil.AssertAllPodsToBeReady(oc, DNSNamespace)
g.By("Deploy test server and client pods")
servertemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-78480",
Template: servertemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-78480",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Deploy FlowCollector with all features enabled with sampling 50")
// Bring in the NetworkEvents feature once its GA. Dont want to skip the whole test-case if the tech-preview flag is not set
flow := Flowcollector{
Namespace: namespace,
EBPFPrivileged: "true",
EBPFeatures: []string{"\"DNSTracking\", \"PacketDrop\", \"FlowRTT\", \"PacketTranslation\""},
Sampling: "50",
LokiNamespace: namespace,
Template: flowFixturePath,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for 2 mins before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(120 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
}
g.By("Verify Packet Drop flows")
lokiParams := []string{"PktDropLatestState=\"TCP_INVALID_STATE\"", "Proto=\"6\""}
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of TCP Invalid State flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.PktDropLatestDropCause).NotTo(o.BeEmpty())
o.Expect(r.Flowlog.PktDropBytes).Should(o.BeNumerically(">", 0))
o.Expect(r.Flowlog.PktDropPackets).Should(o.BeNumerically(">", 0))
}
lokiParams = []string{"PktDropLatestDropCause=\"SKB_DROP_REASON_NO_SOCKET\"", "Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of No Socket TCP flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.PktDropLatestState).NotTo(o.BeEmpty())
o.Expect(r.Flowlog.PktDropBytes).Should(o.BeNumerically(">", 0))
o.Expect(r.Flowlog.PktDropPackets).Should(o.BeNumerically(">", 0))
}
g.By("Verify flowRTT flows")
lokiParams = []string{"Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of TCP flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.TimeFlowRttNs).Should(o.BeNumerically(">=", 0))
}
g.By("Verify TCP DNS flows")
lokilabels.DstK8S_Namespace = DNSNamespace
lokiParams = []string{"DnsFlagsResponseCode=\"NoError\"", "SrcPort=\"53\"", "DstK8S_Name=\"dnsutils1\"", "Proto=\"6\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of TCP DNS flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.DnsLatencyMs).Should(o.BeNumerically(">=", 0))
}
g.By("Verify UDP DNS flows")
lokiParams = []string{"DnsFlagsResponseCode=\"NoError\"", "SrcPort=\"53\"", "DstK8S_Name=\"dnsutils2\"", "Proto=\"17\""}
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of UDP DNS flows > 0")
for _, r := range flowRecords {
o.Expect(r.Flowlog.DnsLatencyMs).Should(o.BeNumerically(">=", 0))
}
g.By("Verify Packet Translation flows")
lokilabels = Lokilabels{
App: "netobserv-flowcollector",
DstK8S_Type: "Service",
DstK8S_Namespace: testClientTemplate.ServerNS,
SrcK8S_Namespace: testClientTemplate.ClientNS,
}
lokiParams = []string{"ZoneId>0"}
g.By("Verify PacketTranslation flows")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of PacketTranslation flows > 0")
verifyPacketTranslationFlows(oc, testClientTemplate.ServerNS, testClientTemplate.ClientNS, flowRecords)
g.By("Verify eBPF feature metrics")
verifyEBPFFeatureMetrics(oc, "Drops")
verifyEBPFFeatureMetrics(oc, "RTT")
verifyEBPFFeatureMetrics(oc, "DNS")
verifyEBPFFeatureMetrics(oc, "Xlat")
})
| |||||
test case
|
openshift/openshift-tests-private
|
91edafa3-5920-421f-90b1-3d8a70bf342a
|
Author:aramesha-NonPreRelease-High-79015-Verify PacketTranslation feature [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-High-79015-Verify PacketTranslation feature [Serial]", func() {
namespace := oc.Namespace()
g.By("Deploy test server and client pods")
servertemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-79015",
ServiceType: "ClusterIP",
Template: servertemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
clientTemplate := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClientTemplate := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client-79015",
Template: clientTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClientTemplate.ClientNS)
err = testClientTemplate.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClientTemplate.ClientNS)
g.By("Deploy FlowCollector with PacketTranslation feature enabled")
flow := Flowcollector{
Namespace: namespace,
EBPFeatures: []string{"\"PacketTranslation\""},
LokiNamespace: namespace,
Template: flowFixturePath,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err := removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for 2 mins before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(120 * time.Second)
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
DstK8S_Type: "Service",
DstK8S_Namespace: testClientTemplate.ServerNS,
SrcK8S_Namespace: testClientTemplate.ClientNS,
}
lokiParams := []string{"ZoneId>0"}
g.By("Verify PacketTranslation flows")
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, startTime, lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of PacketTranslation flows > 0")
verifyPacketTranslationFlows(oc, testClientTemplate.ServerNS, testClientTemplate.ClientNS, flowRecords)
})
| |||||
test case
|
openshift/openshift-tests-private
|
82554a8a-0bcc-405b-b69e-2120216459d0
|
Author:memodi-NonPreRelease-Medium-77894-TechPreview Network Policies Correlation [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:memodi-NonPreRelease-Medium-77894-TechPreview Network Policies Correlation [Serial]", func() {
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("Skipping because the TechPreviewNoUpgrade is not enabled on the cluster.")
}
namespace := oc.Namespace()
g.By("Deploy client-server pods in 2 client NS and one Server NS")
serverTemplate := filePath.Join(baseDir, "test-nginx-server_template.yaml")
testServerTemplate := TestServerTemplate{
ServerNS: "test-server-77894",
Template: serverTemplate,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testServerTemplate.ServerNS)
err := testServerTemplate.createServer(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testServerTemplate.ServerNS)
client1Template := filePath.Join(baseDir, "test-nginx-client_template.yaml")
testClient1Template := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client1-77894",
Template: client1Template,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClient1Template.ClientNS)
err = testClient1Template.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClient1Template.ClientNS)
testClient2Template := TestClientTemplate{
ServerNS: testServerTemplate.ServerNS,
ClientNS: "test-client2-77894",
Template: client1Template,
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testClient2Template.ClientNS)
err = testClient2Template.createClient(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, testClient2Template.ClientNS)
// create flowcollector with NWEvents.
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFeatures: []string{"\"NetworkEvents\""},
EBPFPrivileged: "true",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("get flowlogs from loki")
lokilabels := Lokilabels{
App: "netobserv-flowcollector",
DstK8S_Namespace: testClient1Template.ServerNS,
DstK8S_Type: "Pod",
SrcK8S_Type: "Pod",
}
lokiParams := []string{"FlowDirection!=1"}
lokilabels.SrcK8S_Namespace = testClient1Template.ClientNS
flowRecords, err := lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-2*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
g.By("deploy BANP policy")
banpTemplate := filePath.Join(baseDir, "networking", "baselineadminnetworkPolicy.yaml")
banpParameters := []string{"--ignore-unknown-parameters=true", "-p", "SERVER_NS=" + testClient1Template.ServerNS, "CLIENT1_NS=" + testClient1Template.ClientNS, "CLIENT2_NS=" + testClient2Template.ClientNS, "-f", banpTemplate}
// banp is a cluster scoped resource so passing empty string for NS arg.
defer deleteResource(oc, "banp", "default", "")
err = exutil.ApplyClusterResourceFromTemplateWithError(oc, banpParameters...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("check flows have NW Events")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-2*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "drop", "BaselineAdminNetworkPolicy", "Ingress")
g.By("deploy NetworkPolicy")
netpolTemplate := filePath.Join(baseDir, "networking", "networkPolicy.yaml")
netpolName := "allow-ingress"
netPolParameters := []string{"--ignore-unknown-parameters=true", "-p", "NAME=" + netpolName, "SERVER_NS=" + testClient1Template.ServerNS, "ALLOW_NS=" + testClient1Template.ClientNS, "-f", netpolTemplate}
defer deleteResource(oc, "netpol", netpolName, testClient1Template.ServerNS)
err = exutil.ApplyClusterResourceFromTemplateWithError(oc, netPolParameters...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("check flows from server to client1")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "allow-related", "NetworkPolicy", "Ingress")
g.By("check flows from server to client2")
lokilabels.SrcK8S_Namespace = testClient2Template.ClientNS
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "drop", "NetpolNamespace", "Ingress")
g.By("deploy ANP policy")
anpTemplate := filePath.Join(baseDir, "networking", "adminnetworkPolicy.yaml")
anpName := "server-ns"
anpParameters := []string{"--ignore-unknown-parameters=true", "-p", "NAM=" + anpName, "SERVER_NS=" + testClient1Template.ServerNS, "ALLOW_NS=" + testClient2Template.ClientNS, "DENY_NS=" + testClient1Template.ClientNS, "-f", anpTemplate}
defer deleteResource(oc, "anp", anpName, "")
err = exutil.ApplyClusterResourceFromTemplateWithError(oc, anpParameters...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for 60 secs before logs gets collected and written to loki")
time.Sleep(60 * time.Second)
g.By("check flows from server to client2")
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "allow-related", "AdminNetworkPolicy", "Ingress")
g.By("check flows from server to client1")
lokilabels.SrcK8S_Namespace = testClient1Template.ClientNS
flowRecords, err = lokilabels.getLokiFlowLogs(bearerToken, ls.Route, time.Now().Add(-1*time.Minute), lokiParams...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(flowRecords)).Should(o.BeNumerically(">", 0), "expected number of flowRecords with 'flowDirection != 1' > 0")
verifyNetworkEvents(flowRecords, "drop", "AdminNetworkPolicy", "Ingress")
})
| |||||
test case
|
openshift/openshift-tests-private
|
683b53cb-d869-4047-b7a6-06fe5de41cd1
|
Author:aramesha-NonPreRelease-Longduration-High-56362-High-53597-High-56326-Verify network flows are captured with Kafka with TLS [Serial][Slow]
|
['"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-Longduration-High-56362-High-53597-High-56326-Verify network flows are captured with Kafka with TLS [Serial][Slow]", func() {
namespace := oc.Namespace()
g.By("Deploy FlowCollector with Kafka TLS")
flow := Flowcollector{
Namespace: namespace,
DeploymentModel: "Kafka",
Template: flowFixturePath,
LokiNamespace: namespace,
KafkaAddress: fmt.Sprintf("kafka-cluster-kafka-bootstrap.%s:9093", namespace),
KafkaTLSEnable: "true",
KafkaNamespace: namespace,
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Ensure secrets are synced")
// ensure certs are synced to privileged NS
secrets, err := getSecrets(oc, namespace+"-privileged")
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secrets).To(o.And(o.ContainSubstring(kafkaUser.UserName), o.ContainSubstring(kafka.Name+"-cluster-ca-cert")))
g.By("Verify prometheus is able to scrape metrics for FLP-Kafka")
flpPrpmSM := "flowlogs-pipeline-transformer-monitor"
tlsScheme, err := getMetricsScheme(oc, flpPrpmSM, flow.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
tlsScheme = strings.Trim(tlsScheme, "'")
o.Expect(tlsScheme).To(o.Equal("https"))
serverName, err := getMetricsServerName(oc, flpPrpmSM, flow.Namespace)
serverName = strings.Trim(serverName, "'")
o.Expect(err).NotTo(o.HaveOccurred())
flpPromSA := "flowlogs-pipeline-transformer-prom"
expectedServerName := fmt.Sprintf("%s.%s.svc", flpPromSA, namespace)
o.Expect(serverName).To(o.Equal(expectedServerName))
// verify FLP metrics are being populated with Kafka
// Sleep before making any metrics request
g.By("Verify prometheus is able to scrape FLP metrics")
time.Sleep(30 * time.Second)
verifyFLPMetrics(oc)
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e56b0f1d-0a22-4fbf-9688-5378c0485a98
|
Author:aramesha-NonPreRelease-Longduration-High-57397-High-65116-High-75340-Verify network-flows export with Kafka and netobserv installation without Loki and networkPolicy enabled[Serial]
|
['"encoding/json"', '"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-Longduration-High-57397-High-65116-High-75340-Verify network-flows export with Kafka and netobserv installation without Loki and networkPolicy enabled[Serial]", func() {
namespace := oc.Namespace()
kafkaAddress := fmt.Sprintf("kafka-cluster-kafka-bootstrap.%s:9093", namespace)
g.By("Deploy kafka Topic for export")
// deploy kafka topic for export
kafkaTopic2 := KafkaTopic{
TopicName: "network-flows-export",
Name: kafka.Name,
Namespace: namespace,
Template: kafkaTopicPath,
}
defer kafkaTopic2.deleteKafkaTopic(oc)
kafkaTopic2.deployKafkaTopic(oc)
waitForKafkaTopicReady(oc, kafkaTopic2.TopicName, kafkaTopic2.Namespace)
kafkaExporterConfig := map[string]interface{}{
"kafka": map[string]interface{}{
"address": kafkaAddress,
"tls": map[string]interface{}{
"caCert": map[string]interface{}{
"certFile": "ca.crt",
"name": "kafka-cluster-cluster-ca-cert",
"namespace": namespace,
"type": "secret"},
"enable": true,
"insecureSkipVerify": false,
"userCert": map[string]interface{}{
"certFile": "user.crt",
"certKey": "user.key",
"name": kafkaUser.UserName,
"namespace": namespace,
"type": "secret"},
},
"topic": kafkaTopic2.TopicName},
"type": "Kafka",
}
config, err := json.Marshal(kafkaExporterConfig)
o.Expect(err).ToNot(o.HaveOccurred())
kafkaConfig := string(config)
networkPolicyAddNamespaces := "openshift-ingress"
config, err = json.Marshal(networkPolicyAddNamespaces)
o.Expect(err).ToNot(o.HaveOccurred())
AdditionalNamespaces := string(config)
g.By("Deploy FlowCollector with Kafka TLS")
flow := Flowcollector{
Namespace: namespace,
DeploymentModel: "Kafka",
Template: flowFixturePath,
LokiNamespace: namespace,
KafkaAddress: kafkaAddress,
KafkaTLSEnable: "true",
KafkaNamespace: namespace,
Exporters: []string{kafkaConfig},
NetworkPolicyEnable: "true",
NetworkPolicyAdditionalNamespaces: []string{AdditionalNamespaces},
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
// Scenario1: Verify flows are exported with Kafka DeploymentModel and with Loki enabled
g.By("Verify flowcollector is deployed with KAFKA exporter")
exporterType, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.exporters[0].type}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(exporterType).To(o.Equal(`'Kafka'`))
g.By("Verify flowcollector is deployed with openshift-ingress in additionalNamepsaces section")
addNamespaces, err := oc.AsAdmin().Run("get").Args("flowcollector", "cluster", "-n", namespace, "-o", "jsonpath='{.spec.networkPolicy.additionalNamespaces[0]}'").Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(addNamespaces).To(o.Equal(`'openshift-ingress'`))
g.By("Ensure flows are observed, all pods are running and secrets are synced and plugin pod is deployed")
flow.WaitForFlowcollectorReady(oc)
// ensure certs are synced to privileged NS
secrets, err := getSecrets(oc, namespace+"-privileged")
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secrets).To(o.And(o.ContainSubstring(kafkaUser.UserName), o.ContainSubstring(kafka.Name+"-cluster-ca-cert")))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", namespace)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy Kafka consumer pod")
// using amq-streams/kafka-34-rhel8:2.5.2 version. Update if imagePull issues are observed
consumerTemplate := filePath.Join(kafkaDir, "topic-consumer-tls.yaml")
consumer := Resource{"job", kafkaTopic2.TopicName + "-consumer", namespace}
defer consumer.clear(oc)
err = consumer.applyFromTemplate(oc, "-n", consumer.Namespace, "-f", consumerTemplate, "-p", "NAME="+consumer.Name, "NAMESPACE="+consumer.Namespace, "KAFKA_TOPIC="+kafkaTopic2.TopicName, "CLUSTER_NAME="+kafka.Name, "KAFKA_USER="+kafkaUser.UserName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForPodsReadyWithLabel(oc, namespace, "job-name="+consumer.Name)
consumerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", "job-name="+consumer.Name, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Verify Kafka consumer pod logs")
podLogs, err := exutil.WaitAndGetSpecificPodLogs(oc, namespace, "", consumerPodName, `'{"AgentIP":'`)
exutil.AssertWaitPollNoErr(err, "Did not get log for the pod with job-name=network-flows-export-consumer label")
verifyFlowRecordFromLogs(podLogs)
g.By("Verify NetObserv can be installed without Loki")
flow.DeleteFlowcollector(oc)
// Ensure FLP and eBPF pods are deleted
checkPodDeleted(oc, namespace, "app=flowlogs-pipeline", "flowlogs-pipeline")
checkPodDeleted(oc, namespace+"-privileged", "app=netobserv-ebpf-agent", "netobserv-ebpf-agent")
// Ensure network-policy is deleted
checkResourceDeleted(oc, "networkPolicy", "netobserv", flow.Namespace)
flow.DeploymentModel = "Direct"
flow.LokiEnable = "false"
flow.NetworkPolicyEnable = "false"
flow.CreateFlowcollector(oc)
g.By("Verify Kafka consumer pod logs")
podLogs, err = exutil.WaitAndGetSpecificPodLogs(oc, namespace, "", consumerPodName, `'{"AgentIP":'`)
exutil.AssertWaitPollNoErr(err, "Did not get log for the pod with job-name=network-flows-export-consumer label")
verifyFlowRecordFromLogs(podLogs)
g.By("Verify console plugin pod is not deployed when its disabled in flowcollector")
flow.DeleteFlowcollector(oc)
// Ensure FLP and eBPF pods are deleted
checkPodDeleted(oc, namespace, "app=flowlogs-pipeline", "flowlogs-pipeline")
checkPodDeleted(oc, namespace+"-privileged", "app=netobserv-ebpf-agent", "netobserv-ebpf-agent")
flow.PluginEnable = "false"
flow.CreateFlowcollector(oc)
// Scenario3: Verify all pods except plugin pod are present with only Plugin disabled in flowcollector
g.By("Ensure all pods except consolePlugin pod are deployed")
flow.WaitForFlowcollectorReady(oc)
consolePod, err := exutil.GetAllPodsWithLabel(oc, namespace, "app=netobserv-plugin")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(consolePod)).To(o.Equal(0))
g.By("Ensure all pods are running")
flow.WaitForFlowcollectorReady(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
760e2678-1f86-4066-9968-f172e75f6b13
|
Author:aramesha-NonPreRelease-High-64880-High-75340-Verify secrets copied for Loki and Kafka when deployed in NS other than flowcollector pods [Serial]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-NonPreRelease-High-64880-High-75340-Verify secrets copied for Loki and Kafka when deployed in NS other than flowcollector pods [Serial]", func() {
namespace := oc.Namespace()
g.By("Create a new namespace for flowcollector")
flowNS := "netobserv-test"
defer oc.DeleteSpecifiedNamespaceAsAdmin(flowNS)
oc.CreateSpecifiedNamespaceAsAdmin(flowNS)
g.By("Deploy FlowCollector with Kafka TLS")
lokiURL := fmt.Sprintf("https://%s-gateway-http.%s.svc.cluster.local:8080/api/logs/v1/network/", ls.Name, namespace)
flow := Flowcollector{
Namespace: flowNS,
DeploymentModel: "Kafka",
LokiMode: "Manual",
Template: flowFixturePath,
LokiURL: lokiURL,
LokiTLSCertName: fmt.Sprintf("%s-gateway-ca-bundle", ls.Name),
LokiNamespace: namespace,
KafkaAddress: fmt.Sprintf("kafka-cluster-kafka-bootstrap.%s:9093", namespace),
KafkaTLSEnable: "true",
KafkaNamespace: namespace,
NetworkPolicyEnable: "true",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify networkPolicy is deployed")
networkPolicy, err := oc.AsAdmin().Run("get").Args("networkPolicy", "netobserv", "-n", flow.Namespace).Output()
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(networkPolicy).NotTo(o.BeEmpty())
// ensure certs are synced to privileged NS
secrets, err := getSecrets(oc, flowNS+"-privileged")
o.Expect(err).ToNot(o.HaveOccurred())
o.Expect(secrets).To(o.And(o.ContainSubstring(kafkaUser.UserName), o.ContainSubstring(kafka.Name+"-cluster-ca-cert")))
// verify logs
g.By("Escalate SA to cluster admin")
defer func() {
g.By("Remove cluster role")
err = removeSAFromAdmin(oc, "netobserv-plugin", flowNS)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = addSAToAdmin(oc, "netobserv-plugin", flowNS)
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "netobserv-plugin", flowNS)
g.By("Wait for a min before logs gets collected and written to loki")
startTime := time.Now()
time.Sleep(60 * time.Second)
g.By("Get flowlogs from loki")
err = verifyLokilogsTime(bearerToken, ls.Route, startTime)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
8d40b6d2-fbd9-4f0f-a5d5-245875021aa8
|
Author:aramesha-LEVEL0-Critical-50504-Critical-72959-Verify flowlogs-pipeline and eBPF metrics and health [Serial]
|
['"os/exec"', '"strings"', '"time"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-LEVEL0-Critical-50504-Critical-72959-Verify flowlogs-pipeline and eBPF metrics and health [Serial]", func() {
var (
flpPromSM = "flowlogs-pipeline-monitor"
namespace = oc.Namespace()
eBPFPromSM = "ebpf-agent-svc-monitor"
curlLive = "http://localhost:8080/live"
)
g.By("Deploy flowcollector")
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
FLPMetricServerTLSType: "Disabled",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowlogs-pipeline metrics")
FLPpods, err := exutil.GetAllPodsWithLabel(oc, namespace, "app=flowlogs-pipeline")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range FLPpods {
command := []string{"exec", "-n", namespace, pod, "--", "curl", "-s", curlLive}
output, err := oc.AsAdmin().WithoutNamespace().Run(command...).Args().Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.Equal("{}"))
}
FLPtlsScheme, err := getMetricsScheme(oc, flpPromSM, flow.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
FLPtlsScheme = strings.Trim(FLPtlsScheme, "'")
o.Expect(FLPtlsScheme).To(o.Equal("http"))
g.By("Wait for a min before scraping metrics")
time.Sleep(60 * time.Second)
g.By("Verify prometheus is able to scrape FLP metrics")
verifyFLPMetrics(oc)
g.By("Verify eBPF agent metrics")
eBPFpods, err := exutil.GetAllPodsWithLabel(oc, namespace, "app=netobserv-ebpf-agent")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range eBPFpods {
command := []string{"exec", "-n", namespace, pod, "--", "curl", "-s", curlLive}
output, err := oc.AsAdmin().WithoutNamespace().Run(command...).Args().Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.Equal("{}"))
}
eBPFtlsScheme, err := getMetricsScheme(oc, eBPFPromSM, flow.Namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
eBPFtlsScheme = strings.Trim(eBPFtlsScheme, "'")
o.Expect(eBPFtlsScheme).To(o.Equal("http"))
g.By("Wait for a min before scraping metrics")
time.Sleep(60 * time.Second)
g.By("Verify prometheus is able to scrape eBPF metrics")
verifyEBPFMetrics(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
92dfb0c0-9290-4d0c-91cf-c9c4c22b6c11
|
Author:aramesha-LEVEL0-Critical-54043-Critical-66031-Critical-72959-Verify flowlogs-pipeline, eBPF and Console metrics [Serial]
|
['"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowcollector.go
|
g.It("Author:aramesha-LEVEL0-Critical-54043-Critical-66031-Critical-72959-Verify flowlogs-pipeline, eBPF and Console metrics [Serial]", func() {
var (
flpPromSM = "flowlogs-pipeline-monitor"
flpPromSA = "flowlogs-pipeline-prom"
eBPFPromSM = "ebpf-agent-svc-monitor"
eBPFPromSA = "ebpf-agent-svc-prom"
namespace = oc.Namespace()
)
flow := Flowcollector{
Namespace: namespace,
Template: flowFixturePath,
LokiNamespace: namespace,
EBPFMetricServerTLSType: "Auto",
}
defer flow.DeleteFlowcollector(oc)
flow.CreateFlowcollector(oc)
g.By("Verify flowlogs-pipeline metrics")
FLPtlsScheme, err := getMetricsScheme(oc, flpPromSM, flow.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
FLPtlsScheme = strings.Trim(FLPtlsScheme, "'")
o.Expect(FLPtlsScheme).To(o.Equal("https"))
FLPserverName, err := getMetricsServerName(oc, flpPromSM, flow.Namespace)
FLPserverName = strings.Trim(FLPserverName, "'")
o.Expect(err).NotTo(o.HaveOccurred())
FLPexpectedServerName := fmt.Sprintf("%s.%s.svc", flpPromSA, namespace)
o.Expect(FLPserverName).To(o.Equal(FLPexpectedServerName))
g.By("Wait for a min before scraping metrics")
time.Sleep(60 * time.Second)
g.By("Verify prometheus is able to scrape FLP and Console metrics")
verifyFLPMetrics(oc)
query := fmt.Sprintf("process_start_time_seconds{namespace=\"%s\", job=\"netobserv-plugin-metrics\"}", namespace)
metrics, err := getMetric(oc, query)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(popMetricValue(metrics)).Should(o.BeNumerically(">", 0))
g.By("Verify eBPF metrics")
eBPFtlsScheme, err := getMetricsScheme(oc, eBPFPromSM, flow.Namespace+"-privileged")
o.Expect(err).NotTo(o.HaveOccurred())
eBPFtlsScheme = strings.Trim(eBPFtlsScheme, "'")
o.Expect(eBPFtlsScheme).To(o.Equal("https"))
eBPFserverName, err := getMetricsServerName(oc, eBPFPromSM, flow.Namespace+"-privileged")
eBPFserverName = strings.Trim(eBPFserverName, "'")
o.Expect(err).NotTo(o.HaveOccurred())
eBPFexpectedServerName := fmt.Sprintf("%s.%s.svc", eBPFPromSA, namespace+"-privileged")
o.Expect(eBPFserverName).To(o.Equal(eBPFexpectedServerName))
g.By("Verify prometheus is able to scrape eBPF agent metrics")
verifyEBPFMetrics(oc)
})
| |||||
test
|
openshift/openshift-tests-private
|
c4238550-5cba-4a70-881a-9e5c8203e363
|
test_flowmetrics
|
import (
"fmt"
"os"
filePath "path/filepath"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowmetrics.go
|
package netobserv
import (
"fmt"
"os"
filePath "path/filepath"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-netobserv] Network_Observability", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("netobserv", exutil.KubeConfigPath())
// NetObserv Operator variables
netobservNS = "openshift-netobserv-operator"
NOPackageName = "netobserv-operator"
NOcatSrc = Resource{"catsrc", "netobserv-konflux-fbc", "openshift-marketplace"}
NOSource = CatalogSourceObjects{"stable", NOcatSrc.Name, NOcatSrc.Namespace}
// Template directories
baseDir = exutil.FixturePath("testdata", "netobserv")
subscriptionDir = exutil.FixturePath("testdata", "netobserv", "subscription")
flowFixturePath = filePath.Join(baseDir, "flowcollector_v1beta2_template.yaml")
flowmetricsPath = filePath.Join(baseDir, "flowmetrics_v1alpha1_template.yaml")
// Operator namespace object
OperatorNS = OperatorNamespace{
Name: netobservNS,
NamespaceTemplate: filePath.Join(subscriptionDir, "namespace.yaml"),
}
NO = SubscriptionObjects{
OperatorName: "netobserv-operator",
Namespace: netobservNS,
PackageName: NOPackageName,
Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"),
OperatorGroup: filePath.Join(subscriptionDir, "allnamespace-og.yaml"),
CatalogSource: &NOSource,
}
flow Flowcollector
)
g.BeforeEach(func() {
if strings.Contains(os.Getenv("E2E_RUN_TAGS"), "disconnected") {
g.Skip("Skipping tests for disconnected profiles")
}
g.By("Deploy konflux FBC and ImageDigestMirrorSet")
imageDigest := filePath.Join(subscriptionDir, "image-digest-mirror-set.yaml")
catSrcTemplate := filePath.Join(subscriptionDir, "catalog-source.yaml")
catsrcErr := NOcatSrc.applyFromTemplate(oc, "-n", NOcatSrc.Namespace, "-f", catSrcTemplate)
o.Expect(catsrcErr).NotTo(o.HaveOccurred())
WaitUntilCatSrcReady(oc, NOcatSrc.Name)
ApplyResourceFromFile(oc, netobservNS, imageDigest)
g.By(fmt.Sprintf("Subscribe operators to %s channel", NOSource.Channel))
// check if Network Observability Operator is already present
NOexisting := CheckOperatorStatus(oc, NO.Namespace, NO.PackageName)
// create operatorNS and deploy operator if not present
if !NOexisting {
OperatorNS.DeployOperatorNamespace(oc)
NO.SubscribeOperator(oc)
// check if NO operator is deployed
WaitForPodsReadyWithLabel(oc, NO.Namespace, "app="+NO.OperatorName)
NOStatus := CheckOperatorStatus(oc, NO.Namespace, NO.PackageName)
o.Expect((NOStatus)).To(o.BeTrue())
// check if flowcollector API exists
flowcollectorAPIExists, err := isFlowCollectorAPIExists(oc)
o.Expect((flowcollectorAPIExists)).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create flowcollector in beforeEach
flow = Flowcollector{
Namespace: oc.Namespace(),
EBPFeatures: []string{"\"FlowRTT\""},
LokiMode: "Monolithic",
LokiEnable: "false",
Template: flowFixturePath,
}
flow.CreateFlowcollector(oc)
})
g.AfterEach(func() {
flow.DeleteFlowcollector(oc)
})
g.It("Author:memodi-High-73539-Create custom metrics and charts [Serial]", func() {
namespace := oc.Namespace()
customMetrics := CustomMetrics{
Namespace: namespace,
Template: flowmetricsPath,
}
mainDashversion, err := getResourceVersion(oc, "cm", "netobserv-main", "openshift-config-managed")
o.Expect(err).NotTo(o.HaveOccurred())
curv, err := getResourceVersion(oc, "cm", "flowlogs-pipeline-config-dynamic", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
customMetrics.createCustomMetrics(oc)
waitForResourceGenerationUpdate(oc, "cm", "flowlogs-pipeline-config-dynamic", "resourceVersion", curv, namespace)
customMetricsConfig := customMetrics.getCustomMetricConfigs()
var allUniqueDash = make(map[string]bool)
var uniqueDashboards []string
for _, cmc := range customMetricsConfig {
for _, dashboard := range cmc.DashboardNames {
if _, ok := allUniqueDash[dashboard]; !ok {
allUniqueDash[dashboard] = true
uniqueDashboards = append(uniqueDashboards, dashboard)
}
}
// verify custom metrics queries
for _, query := range cmc.Queries {
metricsQuery := strings.Replace(query, "$METRIC", "netobserv_"+cmc.MetricName, 1)
metricVal := pollMetrics(oc, metricsQuery)
e2e.Logf("metricsQuery %f for query %s", metricVal, metricsQuery)
}
}
// verify dashboard exists
for _, uniqDash := range uniqueDashboards {
dashName := strings.ToLower(regexp.MustCompile(`[^a-zA-Z0-9]+`).ReplaceAllString(uniqDash, "-"))
if dashName == "main" {
waitForResourceGenerationUpdate(oc, "cm", "netobserv-"+dashName, "resourceVersion", mainDashversion, "openshift-config-managed")
}
checkResourceExists(oc, "cm", "netobserv-"+dashName, "openshift-config-managed")
}
})
})
|
package netobserv
| ||||
test case
|
openshift/openshift-tests-private
|
0df411db-1140-4970-a473-0cedf4a4fd15
|
Author:memodi-High-73539-Create custom metrics and charts [Serial]
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/test_flowmetrics.go
|
g.It("Author:memodi-High-73539-Create custom metrics and charts [Serial]", func() {
namespace := oc.Namespace()
customMetrics := CustomMetrics{
Namespace: namespace,
Template: flowmetricsPath,
}
mainDashversion, err := getResourceVersion(oc, "cm", "netobserv-main", "openshift-config-managed")
o.Expect(err).NotTo(o.HaveOccurred())
curv, err := getResourceVersion(oc, "cm", "flowlogs-pipeline-config-dynamic", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
customMetrics.createCustomMetrics(oc)
waitForResourceGenerationUpdate(oc, "cm", "flowlogs-pipeline-config-dynamic", "resourceVersion", curv, namespace)
customMetricsConfig := customMetrics.getCustomMetricConfigs()
var allUniqueDash = make(map[string]bool)
var uniqueDashboards []string
for _, cmc := range customMetricsConfig {
for _, dashboard := range cmc.DashboardNames {
if _, ok := allUniqueDash[dashboard]; !ok {
allUniqueDash[dashboard] = true
uniqueDashboards = append(uniqueDashboards, dashboard)
}
}
// verify custom metrics queries
for _, query := range cmc.Queries {
metricsQuery := strings.Replace(query, "$METRIC", "netobserv_"+cmc.MetricName, 1)
metricVal := pollMetrics(oc, metricsQuery)
e2e.Logf("metricsQuery %f for query %s", metricVal, metricsQuery)
}
}
// verify dashboard exists
for _, uniqDash := range uniqueDashboards {
dashName := strings.ToLower(regexp.MustCompile(`[^a-zA-Z0-9]+`).ReplaceAllString(uniqDash, "-"))
if dashName == "main" {
waitForResourceGenerationUpdate(oc, "cm", "netobserv-"+dashName, "resourceVersion", mainDashversion, "openshift-config-managed")
}
checkResourceExists(oc, "cm", "netobserv-"+dashName, "openshift-config-managed")
}
})
| |||||
file
|
openshift/openshift-tests-private
|
8f491527-8008-4665-9e72-97c3fe8379f2
|
util
|
import (
"context"
"crypto/tls"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
package netobserv
import (
"context"
"crypto/tls"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type TestServerTemplate struct {
ServerNS string
LargeBlob string
ServiceType string
Template string
}
type TestClientTemplate struct {
ServerNS string
ClientNS string
ObjectSize string
Template string
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
// contain checks if b is an elememt of a
func contain(a []string, b string) bool {
for _, c := range a {
if c == b {
return true
}
}
return false
}
func getProxyFromEnv() string {
var proxy string
if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("http_proxy")
} else if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("https_proxy")
}
return proxy
}
func getRouteAddress(oc *exutil.CLI, ns, routeName string) string {
route, err := oc.AdminRouteClient().RouteV1().Routes(ns).Get(context.Background(), routeName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
return route.Spec.Host
}
func processTemplate(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + ".json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
return configFile, err
}
// delete the objects in the cluster
func (r Resource) clear(oc *exutil.CLI) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", r.Namespace, r.Kind, r.Name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", msg)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return nil
}
return err
}
err = r.waitUntilResourceIsGone(oc)
return err
}
// expect: true means we want the resource contain/compare with the expectedContent, false means the resource is expected not to compare with/contain the expectedContent;
// compare: true means compare the expectedContent with the resource content, false means check if the resource contains the expectedContent;
// args are the arguments used to execute command `oc.AsAdmin.WithoutNamespace().Run("get").Args(args...).Output()`;
func checkResource(oc *exutil.CLI, expect, compare bool, expectedContent string, args []string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
if strings.Contains(output, "NotFound") {
return false, nil
}
return false, err
}
if compare {
res := strings.Compare(output, expectedContent)
if (res == 0 && expect) || (res != 0 && !expect) {
return true, nil
}
return false, nil
}
res := strings.Contains(output, expectedContent)
if (res && expect) || (!res && !expect) {
return true, nil
}
return false, nil
})
if expect {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The content doesn't match/contain %s", expectedContent))
} else {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s still exists in the resource", expectedContent))
}
}
// return the infrastructureName. For example: anli922-jglp4
func getInfrastructureName(oc *exutil.CLI) string {
infrastructureName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.infrastructureName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return infrastructureName
}
func patchResourceAsAdmin(oc *exutil.CLI, ns, resource, rsname, patch string) {
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(resource, rsname, "--type=json", "-p", patch, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (r Resource) waitForResourceToAppear(oc *exutil.CLI) error {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.Namespace, r.Kind, r.Name).Output()
if err != nil {
msg := fmt.Sprintf("%v", output)
if strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
e2e.Logf("Find %s %s", r.Kind, r.Name)
return true, nil
})
return err
}
// WaitUntilResourceIsGone waits for the resource to be removed cluster
func (r Resource) waitUntilResourceIsGone(oc *exutil.CLI) error {
return wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.Namespace, r.Kind, r.Name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") || strings.Contains(errstring, "not found") {
return true, nil
}
return true, err
}
return false, nil
})
}
func (r Resource) applyFromTemplate(oc *exutil.CLI, parameters ...string) error {
parameters = append(parameters, "-n", r.Namespace)
file, err := processTemplate(oc, parameters...)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", r.Namespace).Execute()
r.waitForResourceToAppear(oc)
return err
}
// For admin user to create resources in the specified namespace from the file (not template)
func ApplyResourceFromFile(oc *exutil.CLI, ns, file string) {
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// For normal user to create resources in the specified namespace from the file (not template)
func createResourceFromFile(oc *exutil.CLI, ns, file string) {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func WaitForPodsReadyWithLabel(oc *exutil.CLI, ns, label string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
pods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
return false, err
}
if len(pods.Items) == 0 {
e2e.Logf("Waiting for pod with label %s to appear\n", label)
return false, nil
}
ready := true
for _, pod := range pods.Items {
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
ready = false
break
}
}
}
if !ready {
e2e.Logf("Waiting for pod with label %s to be ready...\n", label)
}
return ready, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The pod with label %s is not availabile", label))
}
// WaitForDeploymentPodsToBeReady waits for the specific deployment to be ready
func waitForDeploymentPodsToBeReady(oc *exutil.CLI, namespace, name string) error {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for availability of deployment/%s\n", name)
return false, nil
}
return false, err
}
if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas {
e2e.Logf("Deployment %s available (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return false, nil
})
return err
}
func waitForStatefulsetReady(oc *exutil.CLI, namespace, name string) error {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
ss, err := oc.AdminKubeClient().AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for availability of %s statefulset\n", name)
return false, nil
}
return false, err
}
if ss.Status.ReadyReplicas == *ss.Spec.Replicas && ss.Status.UpdatedReplicas == *ss.Spec.Replicas {
e2e.Logf("statefulset %s available (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s statefulset (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return false, nil
})
return err
}
func getSecrets(oc *exutil.CLI, namespace string) (string, error) {
var secrets string
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 360*time.Second, false, func(context.Context) (done bool, err error) {
secrets, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", "-n", namespace, "-o", "jsonpath='{range .items[*]}{.metadata.name}{\" \"}'").Output()
if err != nil {
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Secrets not available")
return secrets, err
}
// check if pods with label are fully deleted
func checkPodDeleted(oc *exutil.CLI, ns, label, checkValue string) {
podCheck := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
if err != nil || strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(podCheck, fmt.Sprintf("Pod \"%s\" exists or not fully deleted", checkValue))
}
func getSAToken(oc *exutil.CLI, name, ns string) string {
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", name, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return token
}
func doHTTPRequest(header http.Header, address, path, query, method string, quiet bool, attempts int, requestBody io.Reader, expectedStatusCode int) ([]byte, error) {
us, err := buildURL(address, path, query)
if err != nil {
return nil, err
}
if !quiet {
e2e.Logf(us)
}
req, err := http.NewRequest(strings.ToUpper(method), us, requestBody)
if err != nil {
return nil, err
}
req.Header = header
var tr *http.Transport
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
} else {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
client := &http.Client{Transport: tr}
var resp *http.Response
success := false
for attempts > 0 {
attempts--
resp, err = client.Do(req)
if err != nil {
e2e.Logf("error sending request %v", err)
continue
}
if resp.StatusCode != expectedStatusCode {
buf, _ := io.ReadAll(resp.Body) // nolint
e2e.Logf("Error response from server: %s %s (%v), attempts remaining: %d", resp.Status, string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body %v", err)
}
continue
}
success = true
break
}
if !success {
return nil, fmt.Errorf("run out of attempts while querying the server")
}
defer func() {
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body %v", err)
}
}()
return io.ReadAll(resp.Body)
}
func (testTemplate *TestServerTemplate) createServer(oc *exutil.CLI) error {
templateParams := []string{"--ignore-unknown-parameters=true", "-f", testTemplate.Template, "-p", "SERVER_NS=" + testTemplate.ServerNS}
if testTemplate.LargeBlob != "" {
templateParams = append(templateParams, "-p", "LARGE_BLOB="+testTemplate.LargeBlob)
}
if testTemplate.ServiceType != "" {
templateParams = append(templateParams, "-p", "SERVICE_TYPE="+testTemplate.ServiceType)
}
configFile := exutil.ProcessTemplate(oc, templateParams...)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
if err != nil {
return err
}
return nil
}
func (testTemplate *TestClientTemplate) createClient(oc *exutil.CLI) error {
templateParams := []string{"--ignore-unknown-parameters=true", "-f", testTemplate.Template, "-p", "SERVER_NS=" + testTemplate.ServerNS, "-p", "CLIENT_NS=" + testTemplate.ClientNS}
if testTemplate.ObjectSize != "" {
templateParams = append(templateParams, "-p", "OBJECT_SIZE="+testTemplate.ObjectSize)
}
configFile := exutil.ProcessTemplate(oc, templateParams...)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
if err != nil {
return err
}
return nil
}
// wait until DaemonSet is Ready
func waitUntilDaemonSetReady(oc *exutil.CLI, daemonset, namespace string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
desiredNumber, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", daemonset, "-n", namespace, "-o", "jsonpath='{.status.desiredNumberScheduled}'").Output()
if err != nil {
// loop until daemonset is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
numberReady, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", daemonset, "-n", namespace, "-o", "jsonpath='{.status.numberReady}'").Output()
if err != nil {
return false, err
}
numberReadyi, err := strconv.Atoi(strings.Trim(numberReady, "'"))
if err != nil {
return false, err
}
desiredNumberi, err := strconv.Atoi(strings.Trim(desiredNumber, "'"))
if err != nil {
return false, err
}
if numberReadyi != desiredNumberi {
return false, nil
}
updatedNumber, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", daemonset, "-n", namespace, "-o", "jsonpath='{.status.updatedNumberScheduled}'").Output()
if err != nil {
return false, err
}
updatedNumberi, err := strconv.Atoi(strings.Trim(updatedNumber, "'"))
if err != nil {
return false, err
}
if updatedNumberi != desiredNumberi {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Daemonset %s did not become Ready", daemonset))
}
// wait until Deployment is Ready
func waitUntilDeploymentReady(oc *exutil.CLI, deployment, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deployment, "-n", ns, "-o", "jsonpath='{.status.conditions[0].type}'").Output()
if err != nil {
// loop until deployment is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(status, "'") != "Available" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Deployment %s did not become Available", deployment))
}
func getResourceGeneration(oc *exutil.CLI, resource, name, ns string) (int, error) {
gen, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, name, "-o=jsonpath='{.metadata.generation}'", "-n", ns).Output()
if err != nil {
return -1, err
}
genI, err := strconv.Atoi(strings.Trim(gen, "'"))
if err != nil {
return -1, err
}
return genI, nil
}
func getResourceVersion(oc *exutil.CLI, resource, name, ns string) (int, error) {
resV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, name, "-o=jsonpath='{.metadata.resourceVersion}'", "-n", ns).Output()
if err != nil {
return -1, err
}
vers, err := strconv.Atoi(strings.Trim(resV, "'"))
if err != nil {
return -1, err
}
return vers, nil
}
func waitForResourceGenerationUpdate(oc *exutil.CLI, resource, name, field string, prev int, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(context.Context) (done bool, err error) {
var cur int
if field == "generation" {
cur, err = getResourceGeneration(oc, resource, name, ns)
} else if field == "resourceVersion" {
cur, err = getResourceVersion(oc, resource, name, ns)
}
if err != nil {
return false, err
}
if cur != prev {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s/%s generation did not update", resource, name))
}
func checkResourceExists(oc *exutil.CLI, resource, name, ns string) (bool, error) {
stdout, stderr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, name, "-n", ns).Outputs()
if err != nil {
return false, err
}
if strings.Contains(stderr, "NotFound") {
return false, nil
}
if strings.Contains(stdout, name) {
return true, nil
}
return false, nil
}
// get pod logs absolute path
func getPodLogs(oc *exutil.CLI, namespace, podname string) (string, error) {
cargs := []string{"-n", namespace, podname}
var podLogs string
var err error
// add polling as logs could be rotated
err = wait.Poll(10*time.Second, 600*time.Second, func() (bool, error) {
podLogs, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args(cargs...).OutputToFile("podLogs.txt")
if err != nil {
e2e.Logf("unable to get the pod (%s) logs", podname)
return false, err
}
podLogsf, err := os.Stat(podLogs)
if err != nil {
return false, err
}
return podLogsf.Size() > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s pod logs were not collected", podname))
e2e.Logf("pod logs file is %s", podLogs)
return filepath.Abs(podLogs)
}
// wait until NetworkAttachDefinition is Ready
func checkNAD(oc *exutil.CLI, nad, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
nadOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("net-attach-def", nad, "-n", ns).Output()
if err != nil {
// loop until NAD is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if !strings.Contains(nadOutput, nad) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Network Attach Definition %s did not become Available", nad))
}
// wait until hyperconverged is ready
func waitUntilHyperConvergedReady(oc *exutil.CLI, hc, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hyperconverged", hc, "-n", ns, "-o", "jsonpath='{.status.conditions[0].status}'").Output()
if err != nil {
// loop until hyperconverged is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(status, "'") != "True" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("HyperConverged %s did not become Available", hc))
}
// wait until virtual machine is Ready
func waitUntilVMReady(oc *exutil.CLI, vm, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 1200*time.Second, false, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("virtualmachine", vm, "-n", ns, "-o", "jsonpath='{.status.conditions[0].status}'").Output()
if err != nil {
// loop until virtual machine is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(status, "'") != "True" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Virtual machine %s did not become Available", vm))
}
// wait until catalogSource is Ready
func WaitUntilCatSrcReady(oc *exutil.CLI, catSrc string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", catSrc, "-n", "openshift-marketplace", "-o", "jsonpath='{.status.connectionState.lastObservedState}'").Output()
if err != nil {
// loop until catalogSource is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(state, "'") != "READY" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Catalog Source %s did not become Ready", catSrc))
}
// check if cluster has baremetal workers
func hasMetalWorkerNodes(oc *exutil.CLI) bool {
workers, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
for _, w := range workers {
Output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", w, "-o", "jsonpath='{.metadata.labels.node\\.kubernetes\\.io/instance-type}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(Output, "metal") {
e2e.Logf("Cluster does not have metal worker nodes")
return false
}
}
return true
}
// check resource is fully deleted
func checkResourceDeleted(oc *exutil.CLI, resourceType, resourceName, namespace string) {
resourceCheck := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 600*time.Second, false, func(context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-n", namespace).Output()
if !strings.Contains(output, "NotFound") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(resourceCheck, fmt.Sprintf("found %s \"%s\" exist or not fully deleted", resourceType, resourceName))
}
// delete a resource
func deleteResource(oc *exutil.CLI, resourceType, resourceName, namespace string, optionalParameters ...string) {
cmdArgs := []string{resourceType, resourceName, "-n", namespace}
cmdArgs = append(cmdArgs, optionalParameters...)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(cmdArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResourceDeleted(oc, resourceType, resourceName, namespace)
}
// get kubeadmin token of the cluster
func getKubeAdminToken(oc *exutil.CLI, kubeAdminPasswd, serverUrl, currentContext string) string {
longinErr := oc.WithoutNamespace().Run("login").Args("-u", "kubeadmin", "-p", kubeAdminPasswd, serverUrl).NotShowInfo().Execute()
o.Expect(longinErr).NotTo(o.HaveOccurred())
kubeadminToken, kubeadminTokenErr := oc.WithoutNamespace().Run("whoami").Args("-t").Output()
o.Expect(kubeadminTokenErr).NotTo(o.HaveOccurred())
rollbackCtxErr := oc.WithoutNamespace().Run("config").Args("set", "current-context", currentContext).Execute()
o.Expect(rollbackCtxErr).NotTo(o.HaveOccurred())
return kubeadminToken
}
|
package netobserv
| ||||
function
|
openshift/openshift-tests-private
|
9a875b8e-12a7-4445-8ce2-efa6516b7a99
|
getRandomString
|
['"math/rand"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
3cbc7ccb-bd5c-49a7-8d72-76b4dd5ddac8
|
contain
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func contain(a []string, b string) bool {
for _, c := range a {
if c == b {
return true
}
}
return false
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
db13098e-f9e7-4cd1-be74-95600c8aae3e
|
getProxyFromEnv
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getProxyFromEnv() string {
var proxy string
if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("http_proxy")
} else if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("https_proxy")
}
return proxy
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
3d3b6cea-839a-4361-aeaf-3f595bd6fd76
|
getRouteAddress
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getRouteAddress(oc *exutil.CLI, ns, routeName string) string {
route, err := oc.AdminRouteClient().RouteV1().Routes(ns).Get(context.Background(), routeName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
return route.Spec.Host
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
45222696-3361-4525-9dc1-9553d2f17912
|
processTemplate
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func processTemplate(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + ".json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
return configFile, err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
a815c63e-0b85-4659-bdb3-9302f35b2005
|
clear
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func (r Resource) clear(oc *exutil.CLI) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", r.Namespace, r.Kind, r.Name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", msg)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return nil
}
return err
}
err = r.waitUntilResourceIsGone(oc)
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
d9e9990b-4e28-4606-81bc-06afd2866191
|
checkResource
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func checkResource(oc *exutil.CLI, expect, compare bool, expectedContent string, args []string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
if strings.Contains(output, "NotFound") {
return false, nil
}
return false, err
}
if compare {
res := strings.Compare(output, expectedContent)
if (res == 0 && expect) || (res != 0 && !expect) {
return true, nil
}
return false, nil
}
res := strings.Contains(output, expectedContent)
if (res && expect) || (!res && !expect) {
return true, nil
}
return false, nil
})
if expect {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The content doesn't match/contain %s", expectedContent))
} else {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s still exists in the resource", expectedContent))
}
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
001b90b1-a30d-45b0-95da-5057eadd0536
|
getInfrastructureName
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getInfrastructureName(oc *exutil.CLI) string {
infrastructureName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.infrastructureName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return infrastructureName
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
b666d671-d21d-4e01-8e16-b6fbd31f2fe2
|
patchResourceAsAdmin
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func patchResourceAsAdmin(oc *exutil.CLI, ns, resource, rsname, patch string) {
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(resource, rsname, "--type=json", "-p", patch, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
3c4e6f5f-8709-4f0c-8996-d28d21f171b1
|
waitForResourceToAppear
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func (r Resource) waitForResourceToAppear(oc *exutil.CLI) error {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.Namespace, r.Kind, r.Name).Output()
if err != nil {
msg := fmt.Sprintf("%v", output)
if strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
e2e.Logf("Find %s %s", r.Kind, r.Name)
return true, nil
})
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
4fefe031-a36c-4762-b6c0-9a933d522b2b
|
waitUntilResourceIsGone
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func (r Resource) waitUntilResourceIsGone(oc *exutil.CLI) error {
return wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.Namespace, r.Kind, r.Name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") || strings.Contains(errstring, "not found") {
return true, nil
}
return true, err
}
return false, nil
})
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
25b448dc-de72-4b28-913e-3b380169de8b
|
applyFromTemplate
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func (r Resource) applyFromTemplate(oc *exutil.CLI, parameters ...string) error {
parameters = append(parameters, "-n", r.Namespace)
file, err := processTemplate(oc, parameters...)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", r.Namespace).Execute()
r.waitForResourceToAppear(oc)
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
05e1183e-e929-4635-9763-0f611f0e65f5
|
ApplyResourceFromFile
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func ApplyResourceFromFile(oc *exutil.CLI, ns, file string) {
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
d54b1280-20c4-4ada-b514-ac91d57675a0
|
createResourceFromFile
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func createResourceFromFile(oc *exutil.CLI, ns, file string) {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
42d49afa-e190-47eb-b3ca-9f83ac95fab6
|
WaitForPodsReadyWithLabel
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func WaitForPodsReadyWithLabel(oc *exutil.CLI, ns, label string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
pods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
return false, err
}
if len(pods.Items) == 0 {
e2e.Logf("Waiting for pod with label %s to appear\n", label)
return false, nil
}
ready := true
for _, pod := range pods.Items {
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
ready = false
break
}
}
}
if !ready {
e2e.Logf("Waiting for pod with label %s to be ready...\n", label)
}
return ready, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The pod with label %s is not availabile", label))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
207a686b-f18d-405d-9662-05bbc2f192d8
|
waitForDeploymentPodsToBeReady
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitForDeploymentPodsToBeReady(oc *exutil.CLI, namespace, name string) error {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for availability of deployment/%s\n", name)
return false, nil
}
return false, err
}
if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas {
e2e.Logf("Deployment %s available (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return false, nil
})
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
d0984218-86fe-4e50-9a3e-1efc240ce379
|
waitForStatefulsetReady
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitForStatefulsetReady(oc *exutil.CLI, namespace, name string) error {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, false, func(context.Context) (done bool, err error) {
ss, err := oc.AdminKubeClient().AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for availability of %s statefulset\n", name)
return false, nil
}
return false, err
}
if ss.Status.ReadyReplicas == *ss.Spec.Replicas && ss.Status.UpdatedReplicas == *ss.Spec.Replicas {
e2e.Logf("statefulset %s available (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s statefulset (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return false, nil
})
return err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
841cbece-f15e-4677-b59c-2ceb0f94b7f1
|
getSecrets
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getSecrets(oc *exutil.CLI, namespace string) (string, error) {
var secrets string
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 360*time.Second, false, func(context.Context) (done bool, err error) {
secrets, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", "-n", namespace, "-o", "jsonpath='{range .items[*]}{.metadata.name}{\" \"}'").Output()
if err != nil {
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Secrets not available")
return secrets, err
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
1f8c6fe8-3a14-48d6-ac36-3308a6c04d56
|
checkPodDeleted
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func checkPodDeleted(oc *exutil.CLI, ns, label, checkValue string) {
podCheck := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
if err != nil || strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(podCheck, fmt.Sprintf("Pod \"%s\" exists or not fully deleted", checkValue))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
5713c951-f1b0-448f-9198-34c304cd0b8f
|
getSAToken
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getSAToken(oc *exutil.CLI, name, ns string) string {
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", name, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return token
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
0a5fe712-d62a-4cfb-a970-e2c858ccf866
|
doHTTPRequest
|
['"crypto/tls"', '"fmt"', '"io"', '"net/http"', '"net/url"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func doHTTPRequest(header http.Header, address, path, query, method string, quiet bool, attempts int, requestBody io.Reader, expectedStatusCode int) ([]byte, error) {
us, err := buildURL(address, path, query)
if err != nil {
return nil, err
}
if !quiet {
e2e.Logf(us)
}
req, err := http.NewRequest(strings.ToUpper(method), us, requestBody)
if err != nil {
return nil, err
}
req.Header = header
var tr *http.Transport
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
} else {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
client := &http.Client{Transport: tr}
var resp *http.Response
success := false
for attempts > 0 {
attempts--
resp, err = client.Do(req)
if err != nil {
e2e.Logf("error sending request %v", err)
continue
}
if resp.StatusCode != expectedStatusCode {
buf, _ := io.ReadAll(resp.Body) // nolint
e2e.Logf("Error response from server: %s %s (%v), attempts remaining: %d", resp.Status, string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body %v", err)
}
continue
}
success = true
break
}
if !success {
return nil, fmt.Errorf("run out of attempts while querying the server")
}
defer func() {
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body %v", err)
}
}()
return io.ReadAll(resp.Body)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
ebce49f8-cde5-4d09-9442-64c6bd52a5fc
|
createServer
|
['TestServerTemplate']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func (testTemplate *TestServerTemplate) createServer(oc *exutil.CLI) error {
templateParams := []string{"--ignore-unknown-parameters=true", "-f", testTemplate.Template, "-p", "SERVER_NS=" + testTemplate.ServerNS}
if testTemplate.LargeBlob != "" {
templateParams = append(templateParams, "-p", "LARGE_BLOB="+testTemplate.LargeBlob)
}
if testTemplate.ServiceType != "" {
templateParams = append(templateParams, "-p", "SERVICE_TYPE="+testTemplate.ServiceType)
}
configFile := exutil.ProcessTemplate(oc, templateParams...)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
if err != nil {
return err
}
return nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
572374ff-2988-4442-9f2b-fa709525b23a
|
createClient
|
['TestClientTemplate']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func (testTemplate *TestClientTemplate) createClient(oc *exutil.CLI) error {
templateParams := []string{"--ignore-unknown-parameters=true", "-f", testTemplate.Template, "-p", "SERVER_NS=" + testTemplate.ServerNS, "-p", "CLIENT_NS=" + testTemplate.ClientNS}
if testTemplate.ObjectSize != "" {
templateParams = append(templateParams, "-p", "OBJECT_SIZE="+testTemplate.ObjectSize)
}
configFile := exutil.ProcessTemplate(oc, templateParams...)
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute()
if err != nil {
return err
}
return nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
a52adce1-dd5f-44f7-8336-48b20abc5a5a
|
waitUntilDaemonSetReady
|
['"context"', '"fmt"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitUntilDaemonSetReady(oc *exutil.CLI, daemonset, namespace string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
desiredNumber, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", daemonset, "-n", namespace, "-o", "jsonpath='{.status.desiredNumberScheduled}'").Output()
if err != nil {
// loop until daemonset is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
numberReady, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", daemonset, "-n", namespace, "-o", "jsonpath='{.status.numberReady}'").Output()
if err != nil {
return false, err
}
numberReadyi, err := strconv.Atoi(strings.Trim(numberReady, "'"))
if err != nil {
return false, err
}
desiredNumberi, err := strconv.Atoi(strings.Trim(desiredNumber, "'"))
if err != nil {
return false, err
}
if numberReadyi != desiredNumberi {
return false, nil
}
updatedNumber, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", daemonset, "-n", namespace, "-o", "jsonpath='{.status.updatedNumberScheduled}'").Output()
if err != nil {
return false, err
}
updatedNumberi, err := strconv.Atoi(strings.Trim(updatedNumber, "'"))
if err != nil {
return false, err
}
if updatedNumberi != desiredNumberi {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Daemonset %s did not become Ready", daemonset))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
62463c7d-bba6-4f2f-9209-86f6b8b84269
|
waitUntilDeploymentReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitUntilDeploymentReady(oc *exutil.CLI, deployment, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deployment, "-n", ns, "-o", "jsonpath='{.status.conditions[0].type}'").Output()
if err != nil {
// loop until deployment is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(status, "'") != "Available" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Deployment %s did not become Available", deployment))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
c32e1afa-4d84-4a6e-96bf-3dec985814c9
|
getResourceGeneration
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getResourceGeneration(oc *exutil.CLI, resource, name, ns string) (int, error) {
gen, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, name, "-o=jsonpath='{.metadata.generation}'", "-n", ns).Output()
if err != nil {
return -1, err
}
genI, err := strconv.Atoi(strings.Trim(gen, "'"))
if err != nil {
return -1, err
}
return genI, nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
7a6e9f5f-959b-4768-a8ce-b78a630cdba6
|
getResourceVersion
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getResourceVersion(oc *exutil.CLI, resource, name, ns string) (int, error) {
resV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, name, "-o=jsonpath='{.metadata.resourceVersion}'", "-n", ns).Output()
if err != nil {
return -1, err
}
vers, err := strconv.Atoi(strings.Trim(resV, "'"))
if err != nil {
return -1, err
}
return vers, nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
cb09bcd1-350b-4c20-8993-29f5ade17dd1
|
waitForResourceGenerationUpdate
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitForResourceGenerationUpdate(oc *exutil.CLI, resource, name, field string, prev int, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(context.Context) (done bool, err error) {
var cur int
if field == "generation" {
cur, err = getResourceGeneration(oc, resource, name, ns)
} else if field == "resourceVersion" {
cur, err = getResourceVersion(oc, resource, name, ns)
}
if err != nil {
return false, err
}
if cur != prev {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s/%s generation did not update", resource, name))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
1b40663d-e547-41dd-b3cc-ac09c2d28287
|
checkResourceExists
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func checkResourceExists(oc *exutil.CLI, resource, name, ns string) (bool, error) {
stdout, stderr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, name, "-n", ns).Outputs()
if err != nil {
return false, err
}
if strings.Contains(stderr, "NotFound") {
return false, nil
}
if strings.Contains(stdout, name) {
return true, nil
}
return false, nil
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
8880c857-89dc-4dad-8eb1-4579aec5d67c
|
getPodLogs
|
['"fmt"', '"os"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getPodLogs(oc *exutil.CLI, namespace, podname string) (string, error) {
cargs := []string{"-n", namespace, podname}
var podLogs string
var err error
// add polling as logs could be rotated
err = wait.Poll(10*time.Second, 600*time.Second, func() (bool, error) {
podLogs, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args(cargs...).OutputToFile("podLogs.txt")
if err != nil {
e2e.Logf("unable to get the pod (%s) logs", podname)
return false, err
}
podLogsf, err := os.Stat(podLogs)
if err != nil {
return false, err
}
return podLogsf.Size() > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s pod logs were not collected", podname))
e2e.Logf("pod logs file is %s", podLogs)
return filepath.Abs(podLogs)
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
fb9670d5-a6e4-4d4f-a05f-3c809cea5351
|
checkNAD
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func checkNAD(oc *exutil.CLI, nad, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
nadOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("net-attach-def", nad, "-n", ns).Output()
if err != nil {
// loop until NAD is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if !strings.Contains(nadOutput, nad) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Network Attach Definition %s did not become Available", nad))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
c3587e89-797c-4310-a9f7-71f9bd6f3079
|
waitUntilHyperConvergedReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitUntilHyperConvergedReady(oc *exutil.CLI, hc, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hyperconverged", hc, "-n", ns, "-o", "jsonpath='{.status.conditions[0].status}'").Output()
if err != nil {
// loop until hyperconverged is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(status, "'") != "True" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("HyperConverged %s did not become Available", hc))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
45a9cacd-a92b-4a76-b114-b02a32a0087a
|
waitUntilVMReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func waitUntilVMReady(oc *exutil.CLI, vm, ns string) {
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 1200*time.Second, false, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("virtualmachine", vm, "-n", ns, "-o", "jsonpath='{.status.conditions[0].status}'").Output()
if err != nil {
// loop until virtual machine is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(status, "'") != "True" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Virtual machine %s did not become Available", vm))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
b95b3318-34e7-4977-91ab-7dde5b4c24ac
|
WaitUntilCatSrcReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func WaitUntilCatSrcReady(oc *exutil.CLI, catSrc string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 600*time.Second, false, func(context.Context) (done bool, err error) {
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", catSrc, "-n", "openshift-marketplace", "-o", "jsonpath='{.status.connectionState.lastObservedState}'").Output()
if err != nil {
// loop until catalogSource is found or until timeout
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
if strings.Trim(state, "'") != "READY" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Catalog Source %s did not become Ready", catSrc))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
cbc74530-3cf9-4256-bc1c-2d716c2ce680
|
hasMetalWorkerNodes
|
['"io"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func hasMetalWorkerNodes(oc *exutil.CLI) bool {
workers, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
for _, w := range workers {
Output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", w, "-o", "jsonpath='{.metadata.labels.node\\.kubernetes\\.io/instance-type}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(Output, "metal") {
e2e.Logf("Cluster does not have metal worker nodes")
return false
}
}
return true
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
be7f8e4f-f5b3-4466-a6dd-735bc8f2019d
|
checkResourceDeleted
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func checkResourceDeleted(oc *exutil.CLI, resourceType, resourceName, namespace string) {
resourceCheck := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 600*time.Second, false, func(context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-n", namespace).Output()
if !strings.Contains(output, "NotFound") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(resourceCheck, fmt.Sprintf("found %s \"%s\" exist or not fully deleted", resourceType, resourceName))
}
|
netobserv
| ||||
function
|
openshift/openshift-tests-private
|
d5325bc6-3cc9-4c40-a8e6-ccc5d7dee1c3
|
deleteResource
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func deleteResource(oc *exutil.CLI, resourceType, resourceName, namespace string, optionalParameters ...string) {
cmdArgs := []string{resourceType, resourceName, "-n", namespace}
cmdArgs = append(cmdArgs, optionalParameters...)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(cmdArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResourceDeleted(oc, resourceType, resourceName, namespace)
}
|
netobserv
| |||||
function
|
openshift/openshift-tests-private
|
dc399266-4420-4bc7-a950-dfbc835d1c37
|
getKubeAdminToken
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/netobserv/util.go
|
func getKubeAdminToken(oc *exutil.CLI, kubeAdminPasswd, serverUrl, currentContext string) string {
longinErr := oc.WithoutNamespace().Run("login").Args("-u", "kubeadmin", "-p", kubeAdminPasswd, serverUrl).NotShowInfo().Execute()
o.Expect(longinErr).NotTo(o.HaveOccurred())
kubeadminToken, kubeadminTokenErr := oc.WithoutNamespace().Run("whoami").Args("-t").Output()
o.Expect(kubeadminTokenErr).NotTo(o.HaveOccurred())
rollbackCtxErr := oc.WithoutNamespace().Run("config").Args("set", "current-context", currentContext).Execute()
o.Expect(rollbackCtxErr).NotTo(o.HaveOccurred())
return kubeadminToken
}
|
netobserv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.