element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
036e8a94-3844-47e5-b6e0-1ce48f7f3be2
|
createVFPolicy
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['VFPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (vfpolicy *VFPolicyResource) createVFPolicy(oc *exutil.CLI) error {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vfpolicy.template, "-p", "NAME="+vfpolicy.name, "INTFNAME="+vfpolicy.intfname, "NODENAME="+vfpolicy.nodename, "TOTALVFS="+strconv.Itoa(int(vfpolicy.totalvfs)))
if err1 != nil {
e2e.Logf("Creating VF on sriov node failed :%v, and try next round", err1)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("fail to VF on sriov node %v", vfpolicy.name)
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
d0adc05d-3c1f-4421-b49f-80718e4324a9
|
createPolicySpecificNode
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sriovNetworkNodePolicySpecificNode']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (sriovNNPolicy *sriovNetworkNodePolicySpecificNode) createPolicySpecificNode(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovNNPolicy.template, "-p", "NAMESPACE="+sriovNNPolicy.namespace, "SRIOVNETPOLICY="+sriovNNPolicy.policyName, "DEVICETYPE="+sriovNNPolicy.deviceType, "PFNAME="+sriovNNPolicy.pfName, "NUMVFS="+strconv.Itoa(sriovNNPolicy.numVfs), "RESOURCENAME="+sriovNNPolicy.resourceName, "NODENAME="+sriovNNPolicy.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create sriovnetworknodePolicy %v", sriovNNPolicy.policyName))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
fe9a6cf1-9a08-44f9-a7bd-2b4858294748
|
createSriovTestPodMAC
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sriovTestPod', 'sriovTestPodMAC']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (sriovTestPod *sriovTestPodMAC) createSriovTestPodMAC(oc *exutil.CLI) {
err := wait.Poll(2*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovTestPod.tempfile, "-p", "PODNAME="+sriovTestPod.name, "SRIOVNETNAME="+sriovTestPod.sriovnetname, "TARGETNS="+sriovTestPod.namespace, "IP_ADDR="+sriovTestPod.ipaddr, "MAC_ADDR="+sriovTestPod.macaddr)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create test pod %v", sriovTestPod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
57f9bd7a-80af-451e-acca-6b38334016a3
|
uninstallSriovOperator
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func uninstallSriovOperator(oc *exutil.CLI, namespace string) {
// Delete SRIOV network related config except SriovNetworkPoolConfig, subscription, CSV, and DS under openshift-sriov-network-operator namespace
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("sriovnetwork", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting sirovnetwork")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("sriovnetworknodepolicy", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting sirovnetworknodepolicy")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("subscription", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting subscription under openshift-sriov-network-operator namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("csv", "--all", "-n", namespace).Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting CSV under openshift-sriov-network-operator namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ds", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting DS under openshift-sriov-network-operator namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment", "sriov-network-operator", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting deployment/sriov-network-operator under openshift-sriov-network-operator namespace")
// Verify SRIOV network related config, subscription, CSV, and DS under openshift-sriov-network-operator namespace are removed
sriovconfigs := [6]string{"sriovnetwork", "sriovnetworknodepolicy", "subscription", "csv", "ds", "deployment"}
for _, config := range sriovconfigs {
sriovChkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(config, "-n", namespace).Output()
e2e.Logf("\n output after deleting %s: %s\n", config, output)
if strings.Contains(output, "not found") || strings.Contains(output, "No resources found") {
e2e.Logf("mutatingwebhookconfigs %s is delete successfully", config)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(sriovChkErr, fmt.Sprintf("Failed to delete resource %s", config))
}
// Delete SRIOV related CRD under openshift-sriov-network-operator namespace
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("crd", "sriovibnetworks.sriovnetwork.openshift.io "+
"sriovnetworknodepolicies.sriovnetwork.openshift.io "+
"sriovnetworknodestates.sriovnetwork.openshift.io "+
"sriovnetworkpoolconfigs.sriovnetwork.openshift.io "+
"sriovnetworks.sriovnetwork.openshift.io "+
"sriovoperatorconfigs.sriovnetwork.openshift.io", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting SRIOV related CRD")
// Verify SRIOV related CRD under openshift-sriov-network-operator namespace are removed
chkOutput, _ := exec.Command("bash", "-c", "oc crd | grep sriov").Output()
o.Expect(string(chkOutput)).Should(o.BeEmpty(), "Not all SRIOV CRD were removed")
// Delete webhook related configurations
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("mutatingwebhookconfigurations", "network-resources-injector-config", "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting network-resources-injector-config")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("MutatingWebhookConfiguration", "sriov-operator-webhook-config", "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting MutatingWebhookConfiguration sriov-operator-webhook-config")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ValidatingWebhookConfiguration", "sriov-operator-webhook-config", "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting ValidatingWebhookConfiguration sriov-operator-webhook-config")
// Verify webhook related configurations are removed
mutatingwebhookconfigs := [2]string{"network-resources-injector-config", "sriov-operator-webhook-config"}
for _, config := range mutatingwebhookconfigs {
sriovChkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mutatingwebhookconfigurations", config).Output()
e2e.Logf("\n output after deleting %s: %s\n", config, output)
if strings.Contains(output, "not found") || strings.Contains(output, "No resources found") {
e2e.Logf("mutatingwebhookconfigs %s is delete successfully", config)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(sriovChkErr, fmt.Sprintf("Failed to delete resource %s", config))
}
sriovChkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ValidatingWebhookConfiguration", "sriov-operator-webhook-config").Output()
e2e.Logf("\n\n\n output after deleting ValidatingWebhookConfiguration sriov-operator-webhook-config: %s\n\n\n", output)
if strings.Contains(output, "not found") || strings.Contains(output, "No resources found") {
e2e.Logf("ValidatingWebhookConfiguration sriov-operator-webhook-config is delete successfully")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(sriovChkErr, "Failed to delete ValidatingWebhookConfiguration sriov-operator-webhook-config")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
df84d317-9716-45fc-bbbb-726ed78a059d
|
installSriovOperator
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func installSriovOperator(oc *exutil.CLI, opNamespace string) {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
namespaceTemplate = filepath.Join(buildPruningBaseDir, "namespace-template.yaml")
operatorGroupTemplate = filepath.Join(buildPruningBaseDir, "operatorgroup-template.yaml")
subscriptionTemplate = filepath.Join(buildPruningBaseDir, "subscription-template.yaml")
sriovOperatorconfig = filepath.Join(buildPruningBaseDir, "sriovoperatorconfig.yaml")
opName = "sriov-network-operators"
)
sub := subscriptionResource{
name: "sriov-network-operator-subsription",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
e2e.Logf("Operator install check successfull as part of setup !!!!!")
exutil.By("SUCCESS - sriov operator installed")
exutil.By("check sriov version if match the ocp version")
operatorVersion := getOperatorVersion(oc, sub.name, sub.namespace)
ocpversion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(operatorVersion).Should(o.MatchRegexp(ocpversion))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", sriovOperatorconfig, "-n", opNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check all pods in sriov namespace are running")
chkSriovOperatorStatus(oc, sub.namespace)
}
|
networking
| ||||
test
|
openshift/openshift-tests-private
|
8ddf2ffc-7fce-44e0-9e63-7dcca89ec12f
|
winc
|
import (
"net"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/winc.go
|
package networking
import (
"net"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-networking] SDN winc", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-winc", exutil.KubeConfigPath())
// author: [email protected]
g.It("Author:anusaxen-High-51798-Check nodeport ETP Cluster and Local functionality wrt window node", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodWinNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-window-template.yaml")
windowGenericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-window-template.yaml")
)
linuxNodeList, err := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(err).NotTo(o.HaveOccurred())
windowNodeList, err := exutil.GetAllNodesbyOSType(oc, "windows")
o.Expect(err).NotTo(o.HaveOccurred())
if len(linuxNodeList) < 2 || len(windowNodeList) < 1 {
g.Skip("This case requires at least 1 window node, and 2 linux nodes")
}
g.By("Create a namespace")
oc.SetupProject()
ns := oc.Namespace()
g.By("create a window pod in ns")
pod := pingPodResourceWinNode{
name: "win-webserver",
namespace: ns,
image: "mcr.microsoft.com/powershell:lts-nanoserver-ltsc2022",
nodename: windowNodeList[0],
template: pingPodWinNodeTemplate,
}
pod.createPingPodWinNode(oc)
testPodName := getPodName(oc, ns, "app=win-webserver")
waitPodReady(oc, ns, testPodName[0])
g.By("Create a cluster type nodeport test service for above window pod")
svc := windowGenericServiceResource{
servicename: "win-nodeport-service",
namespace: ns,
protocol: "TCP",
selector: "win-webserver",
serviceType: "NodePort",
ipFamilyPolicy: "SingleStack",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "Cluster",
template: windowGenericServiceTemplate,
}
svc.createWinServiceFromParams(oc)
_, nodePort := getSvcIP(oc, ns, "win-nodeport-service")
_, winNodeIP := getNodeIP(oc, windowNodeList[0])
winNodeURL := net.JoinHostPort(winNodeIP, nodePort)
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", winNodeURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
_, linuxNodeIP := getNodeIP(oc, linuxNodeList[0])
linuxNodeURL := net.JoinHostPort(linuxNodeIP, nodePort)
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", linuxNodeURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete nodeport svc from ns and recreate it with ETP Local")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "win-nodeport-service", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svc.externalTrafficPolicy = "Local"
svc.createWinServiceFromParams(oc)
_, nodePort = getSvcIP(oc, ns, "win-nodeport-service")
//nodePort value might have changed so fetching new URLs for JoinHostPort
winNodeURLnew := net.JoinHostPort(winNodeIP, nodePort)
linuxNodeURLnew := net.JoinHostPort(linuxNodeIP, nodePort)
g.By("linux worker 0 to window node should work because its external traffic from another node and destination window node has a backend pod on it, ETP=Local respected")
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", winNodeURLnew, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("linux Worker 0 to linux worker 0 should work like ETP=cluster because its not external traffic, its within the node. ETP=local shouldn't be respected and its like ETP=cluster behaviour")
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", linuxNodeURLnew, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
37042579-76af-434e-bbea-7c8780ebad5e
|
Author:anusaxen-High-51798-Check nodeport ETP Cluster and Local functionality wrt window node
|
['"net"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/winc.go
|
g.It("Author:anusaxen-High-51798-Check nodeport ETP Cluster and Local functionality wrt window node", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodWinNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-window-template.yaml")
windowGenericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-window-template.yaml")
)
linuxNodeList, err := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(err).NotTo(o.HaveOccurred())
windowNodeList, err := exutil.GetAllNodesbyOSType(oc, "windows")
o.Expect(err).NotTo(o.HaveOccurred())
if len(linuxNodeList) < 2 || len(windowNodeList) < 1 {
g.Skip("This case requires at least 1 window node, and 2 linux nodes")
}
g.By("Create a namespace")
oc.SetupProject()
ns := oc.Namespace()
g.By("create a window pod in ns")
pod := pingPodResourceWinNode{
name: "win-webserver",
namespace: ns,
image: "mcr.microsoft.com/powershell:lts-nanoserver-ltsc2022",
nodename: windowNodeList[0],
template: pingPodWinNodeTemplate,
}
pod.createPingPodWinNode(oc)
testPodName := getPodName(oc, ns, "app=win-webserver")
waitPodReady(oc, ns, testPodName[0])
g.By("Create a cluster type nodeport test service for above window pod")
svc := windowGenericServiceResource{
servicename: "win-nodeport-service",
namespace: ns,
protocol: "TCP",
selector: "win-webserver",
serviceType: "NodePort",
ipFamilyPolicy: "SingleStack",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "Cluster",
template: windowGenericServiceTemplate,
}
svc.createWinServiceFromParams(oc)
_, nodePort := getSvcIP(oc, ns, "win-nodeport-service")
_, winNodeIP := getNodeIP(oc, windowNodeList[0])
winNodeURL := net.JoinHostPort(winNodeIP, nodePort)
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", winNodeURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
_, linuxNodeIP := getNodeIP(oc, linuxNodeList[0])
linuxNodeURL := net.JoinHostPort(linuxNodeIP, nodePort)
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", linuxNodeURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete nodeport svc from ns and recreate it with ETP Local")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "win-nodeport-service", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svc.externalTrafficPolicy = "Local"
svc.createWinServiceFromParams(oc)
_, nodePort = getSvcIP(oc, ns, "win-nodeport-service")
//nodePort value might have changed so fetching new URLs for JoinHostPort
winNodeURLnew := net.JoinHostPort(winNodeIP, nodePort)
linuxNodeURLnew := net.JoinHostPort(linuxNodeIP, nodePort)
g.By("linux worker 0 to window node should work because its external traffic from another node and destination window node has a backend pod on it, ETP=Local respected")
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", winNodeURLnew, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("linux Worker 0 to linux worker 0 should work like ETP=cluster because its not external traffic, its within the node. ETP=local shouldn't be respected and its like ETP=cluster behaviour")
_, err = exutil.DebugNode(oc, linuxNodeList[0], "curl", linuxNodeURLnew, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test
|
openshift/openshift-tests-private
|
941a9ccd-1c4e-408e-b77a-9bd09913011f
|
egressrouter
|
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressrouter.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN ovn-kubernetes egressrouter", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-egressrouter", exutil.KubeConfigPath())
g.BeforeEach(func() {
platform := exutil.CheckPlatform(oc)
e2e.Logf("\n\nThe platform is %v\n", platform)
networkType := checkNetworkType(oc)
acceptedPlatform := strings.Contains(platform, "baremetal")
if !acceptedPlatform || !strings.Contains(networkType, "ovn") {
g.Skip("Test cases should be run on BareMetal cluster, skip for other platforms or other non-OVN network plugin!!")
}
if checkProxy(oc) {
g.Skip("This is proxy cluster, skip the test.")
}
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-High-42340-Egress router redirect mode with multiple destinations.", func() {
ipStackType := checkIPStackType(oc)
exutil.By("Skip testing on ipv6 single stack cluster")
if ipStackType == "ipv6single" {
g.Skip("Skip for single stack cluster!!!")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(buildPruningBaseDir, "egressrouter")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressRouterTemplate = filepath.Join(egressBaseDir, "egressrouter-multiple-destination-template.yaml")
egressRouterService = filepath.Join(egressBaseDir, "serive-egressrouter.yaml")
egressRouterServiceDualStack = filepath.Join(egressBaseDir, "serive-egressrouter-dualstack.yaml")
url = "www.google.com"
)
exutil.By("1. nslookup obtain dns server ip for url \n")
destinationIP := nslookDomainName(url)
e2e.Logf("ip address from nslookup for %v: %v", url, destinationIP)
exutil.By("2. Get gateway for one worker node \n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
gateway := getIPv4Gateway(oc, nodeList.Items[0].Name)
o.Expect(gateway).ShouldNot(o.BeEmpty())
freeIP := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIP)).Should(o.Equal(1))
prefixIP := getInterfacePrefix(oc, nodeList.Items[0].Name)
o.Expect(prefixIP).ShouldNot(o.BeEmpty())
reservedIP := fmt.Sprintf("%s/%s", freeIP[0], prefixIP)
exutil.By("3. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("4. Create egressrouter \n")
egressrouter := egressrouterMultipleDst{
name: "egressrouter-42430",
namespace: ns1,
reservedip: reservedIP,
gateway: gateway,
destinationip1: destinationIP,
destinationip2: destinationIP,
destinationip3: destinationIP,
template: egressRouterTemplate,
}
egressrouter.createEgressRouterMultipeDst(oc)
err = waitForPodWithLabelReady(oc, ns1, "app=egress-router-cni")
exutil.AssertWaitPollNoErr(err, "EgressRouter pod is not ready!")
exutil.By("5. Schedule the worker \n")
// In rdu1 and rdu2 clusters, there are two sriov nodes with mlx nic, by default, egressrouter case cannot run on it
// So here exclude sriov nodes in rdu1 and rdu2 clusters, just use the other common worker nodes
workers := excludeSriovNodes(oc)
o.Expect(len(workers) > 0).Should(o.BeTrue(), fmt.Sprintf("The number of common worker nodes in the cluster is %v ", len(workers)))
if len(workers) < nodeList.Size() {
e2e.Logf("There are sriov workers in the cluster, will schedule the egress router pod to a common node.")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "deployment/egress-router-cni-deployment", "-p", "{\"spec\":{\"template\":{\"spec\":{\"nodeName\":\""+workers[0]+"\"}}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("-n", ns1, "status", "deployment/egress-router-cni-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("successfully rolled out"))
}
exutil.By("6. Create service for egress router pod! \n")
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns1, egressRouterServiceDualStack)
} else {
createResourceFromFile(oc, ns1, egressRouterService)
}
exutil.By("7. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
template: pingPodTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("8. Get service IP \n")
var svcIPv4 string
if ipStackType == "dualstack" {
_, svcIPv4 = getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
} else {
svcIPv4, _ = getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
}
exutil.By("9. Check result,the svc for egessrouter can be accessed \n")
_, err = e2eoutput.RunHostCmdWithRetries(pod1.namespace, pod1.name, "curl -s "+svcIPv4+":5000 --connect-timeout 10", 5*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:5000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmdWithRetries(pod1.namespace, pod1.name, "curl -s "+svcIPv4+":6000 --connect-timeout 10", 5*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:6000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmdWithRetries(pod1.namespace, pod1.name, "curl -s "+svcIPv4+":80 --connect-timeout 10", 5*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:80 with error:%v", svcIPv4, err))
})
// author: [email protected]
g.It("ConnectedOnly-NonPreRelease-PreChkUpgrade-Author:jechen-High-63155-Pre Egress router redirect mode with multiple destinations should still be functional after upgrade.", func() {
ipStackType := checkIPStackType(oc)
exutil.By("Skip testing on ipv6 single stack cluster")
if ipStackType == "ipv6single" {
g.Skip("Skip for single stack cluster!!!")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(buildPruningBaseDir, "egressrouter")
statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
egressRouterTemplate = filepath.Join(egressBaseDir, "egressrouter-multiple-destination-template.yaml")
egressRouterService = filepath.Join(egressBaseDir, "serive-egressrouter.yaml")
ns1 = "63155-upgrade-ns"
)
exutil.By("1.Get gateway for one worker node \n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
gateway := getIPv4Gateway(oc, nodeList.Items[0].Name)
o.Expect(gateway).ShouldNot(o.BeEmpty())
freeIP := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIP)).Should(o.Equal(1))
prefixIP := getInterfacePrefix(oc, nodeList.Items[0].Name)
o.Expect(prefixIP).ShouldNot(o.BeEmpty())
reservedIP := fmt.Sprintf("%s/%s", freeIP[0], prefixIP)
exutil.By("2. Obtain the namespace \n")
oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns1).Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("3 Create egressrouter \n")
egressrouter := egressrouterMultipleDst{
name: "egressrouter-63155",
namespace: ns1,
reservedip: reservedIP,
gateway: gateway,
destinationip1: "142.250.188.206",
destinationip2: "142.250.188.206",
destinationip3: "142.250.188.206",
template: egressRouterTemplate,
}
egressrouter.createEgressRouterMultipeDst(oc)
err = waitForPodWithLabelReady(oc, ns1, "app=egress-router-cni")
exutil.AssertWaitPollNoErr(err, "EgressRouter pod is not ready!")
exutil.By("4. Schedule the worker \n")
// In rdu1 and rdu2 clusters, there are two sriov nodes with mlx nic, by default, egressrouter case cannot run on it
// So here exclude sriov nodes in rdu1 and rdu2 clusters, just use the other common worker nodes
workers := excludeSriovNodes(oc)
o.Expect(len(workers) > 0).Should(o.BeTrue(), fmt.Sprintf("The number of common worker nodes in the cluster is %v ", len(workers)))
if len(workers) < nodeList.Size() {
e2e.Logf("There are sriov workers in the cluster, will schedule the egress router pod to a common node.")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "deployment/egress-router-cni-deployment", "-p", "{\"spec\":{\"template\":{\"spec\":{\"nodeName\":\""+workers[0]+"\"}}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("-n", ns1, "status", "deployment/egress-router-cni-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("successfully rolled out"))
}
exutil.By("5. Create serive for egress router pod! \n")
createResourceFromFile(oc, ns1, egressRouterService)
exutil.By("6. create hello pod in ns1 \n")
createResourceFromFile(oc, ns1, statefulSetHelloPod)
podErr := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodname := getPodName(oc, ns1, "app=hello")
exutil.By("7. Get service IP \n")
svcIPv4, _ := getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
exutil.By("8. Check result,the svc for egessrouter can be accessed \n")
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":5000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:5000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":6000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:6000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":80 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:80 with error:%v", svcIPv4, err))
})
g.It("ConnectedOnly-NonPreRelease-PstChkUpgrade-Author:jechen-High-63155-Pst Egress router redirect mode with multiple destinations should still be funcitonal after upgrade.", func() {
ipStackType := checkIPStackType(oc)
exutil.By("Skip testing on ipv6 single stack cluster")
if ipStackType == "ipv6single" {
g.Skip("Skip for single stack cluster!!!")
}
ns1 := "63155-upgrade-ns"
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", ns1).Execute()
if nsErr != nil {
g.Skip("Skip the PstChkUpgrade test as 63155-upgrade-ns namespace does not exist, PreChkUpgrade test did not run")
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns1, "--ignore-not-found=true").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "hello-pod1", "-n", ns1, "--ignore-not-found=true").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("egressrouters", "egressrouter-63155", "-n", ns1, "--ignore-not-found=true").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("Service", "ovn-egressrouter-multidst-svc", "-n", ns1, "--ignore-not-found=true").Execute()
exutil.By("1. check egressrouter pod \n")
err := waitForPodWithLabelReady(oc, ns1, "app=egress-router-cni")
exutil.AssertWaitPollNoErr(err, "EgressRouter pod is not ready!")
exutil.By("2. check egressrouter deployment \n")
output, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("-n", ns1, "status", "deployment/egress-router-cni-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("successfully rolled out"))
exutil.By("3. Get the hello pod in ns1 \n")
helloPodname := getPodName(oc, ns1, "app=hello")
o.Expect(len(helloPodname)).Should(o.Equal(1))
exutil.By("4. Get egressrouter service IP \n")
svcIPv4, _ := getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
exutil.By("5. Check svc for egessrouter can be accessed \n")
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":5000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:5000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":6000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:6000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":80 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:80 with error:%v", svcIPv4, err))
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
5c7a1628-7090-49d5-a67c-6b22e29a9b65
|
ConnectedOnly-Author:huirwang-High-42340-Egress router redirect mode with multiple destinations.
|
['"context"', '"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressrouter.go
|
g.It("ConnectedOnly-Author:huirwang-High-42340-Egress router redirect mode with multiple destinations.", func() {
ipStackType := checkIPStackType(oc)
exutil.By("Skip testing on ipv6 single stack cluster")
if ipStackType == "ipv6single" {
g.Skip("Skip for single stack cluster!!!")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(buildPruningBaseDir, "egressrouter")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressRouterTemplate = filepath.Join(egressBaseDir, "egressrouter-multiple-destination-template.yaml")
egressRouterService = filepath.Join(egressBaseDir, "serive-egressrouter.yaml")
egressRouterServiceDualStack = filepath.Join(egressBaseDir, "serive-egressrouter-dualstack.yaml")
url = "www.google.com"
)
exutil.By("1. nslookup obtain dns server ip for url \n")
destinationIP := nslookDomainName(url)
e2e.Logf("ip address from nslookup for %v: %v", url, destinationIP)
exutil.By("2. Get gateway for one worker node \n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
gateway := getIPv4Gateway(oc, nodeList.Items[0].Name)
o.Expect(gateway).ShouldNot(o.BeEmpty())
freeIP := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIP)).Should(o.Equal(1))
prefixIP := getInterfacePrefix(oc, nodeList.Items[0].Name)
o.Expect(prefixIP).ShouldNot(o.BeEmpty())
reservedIP := fmt.Sprintf("%s/%s", freeIP[0], prefixIP)
exutil.By("3. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("4. Create egressrouter \n")
egressrouter := egressrouterMultipleDst{
name: "egressrouter-42430",
namespace: ns1,
reservedip: reservedIP,
gateway: gateway,
destinationip1: destinationIP,
destinationip2: destinationIP,
destinationip3: destinationIP,
template: egressRouterTemplate,
}
egressrouter.createEgressRouterMultipeDst(oc)
err = waitForPodWithLabelReady(oc, ns1, "app=egress-router-cni")
exutil.AssertWaitPollNoErr(err, "EgressRouter pod is not ready!")
exutil.By("5. Schedule the worker \n")
// In rdu1 and rdu2 clusters, there are two sriov nodes with mlx nic, by default, egressrouter case cannot run on it
// So here exclude sriov nodes in rdu1 and rdu2 clusters, just use the other common worker nodes
workers := excludeSriovNodes(oc)
o.Expect(len(workers) > 0).Should(o.BeTrue(), fmt.Sprintf("The number of common worker nodes in the cluster is %v ", len(workers)))
if len(workers) < nodeList.Size() {
e2e.Logf("There are sriov workers in the cluster, will schedule the egress router pod to a common node.")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "deployment/egress-router-cni-deployment", "-p", "{\"spec\":{\"template\":{\"spec\":{\"nodeName\":\""+workers[0]+"\"}}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("-n", ns1, "status", "deployment/egress-router-cni-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("successfully rolled out"))
}
exutil.By("6. Create service for egress router pod! \n")
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns1, egressRouterServiceDualStack)
} else {
createResourceFromFile(oc, ns1, egressRouterService)
}
exutil.By("7. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
template: pingPodTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("8. Get service IP \n")
var svcIPv4 string
if ipStackType == "dualstack" {
_, svcIPv4 = getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
} else {
svcIPv4, _ = getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
}
exutil.By("9. Check result,the svc for egessrouter can be accessed \n")
_, err = e2eoutput.RunHostCmdWithRetries(pod1.namespace, pod1.name, "curl -s "+svcIPv4+":5000 --connect-timeout 10", 5*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:5000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmdWithRetries(pod1.namespace, pod1.name, "curl -s "+svcIPv4+":6000 --connect-timeout 10", 5*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:6000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmdWithRetries(pod1.namespace, pod1.name, "curl -s "+svcIPv4+":80 --connect-timeout 10", 5*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:80 with error:%v", svcIPv4, err))
})
| |||||
test case
|
openshift/openshift-tests-private
|
ec867ec0-1f71-47fa-9b8b-f4c8e8556a65
|
ConnectedOnly-NonPreRelease-PreChkUpgrade-Author:jechen-High-63155-Pre Egress router redirect mode with multiple destinations should still be functional after upgrade.
|
['"context"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressrouter.go
|
g.It("ConnectedOnly-NonPreRelease-PreChkUpgrade-Author:jechen-High-63155-Pre Egress router redirect mode with multiple destinations should still be functional after upgrade.", func() {
ipStackType := checkIPStackType(oc)
exutil.By("Skip testing on ipv6 single stack cluster")
if ipStackType == "ipv6single" {
g.Skip("Skip for single stack cluster!!!")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(buildPruningBaseDir, "egressrouter")
statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
egressRouterTemplate = filepath.Join(egressBaseDir, "egressrouter-multiple-destination-template.yaml")
egressRouterService = filepath.Join(egressBaseDir, "serive-egressrouter.yaml")
ns1 = "63155-upgrade-ns"
)
exutil.By("1.Get gateway for one worker node \n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
gateway := getIPv4Gateway(oc, nodeList.Items[0].Name)
o.Expect(gateway).ShouldNot(o.BeEmpty())
freeIP := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIP)).Should(o.Equal(1))
prefixIP := getInterfacePrefix(oc, nodeList.Items[0].Name)
o.Expect(prefixIP).ShouldNot(o.BeEmpty())
reservedIP := fmt.Sprintf("%s/%s", freeIP[0], prefixIP)
exutil.By("2. Obtain the namespace \n")
oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns1).Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("3 Create egressrouter \n")
egressrouter := egressrouterMultipleDst{
name: "egressrouter-63155",
namespace: ns1,
reservedip: reservedIP,
gateway: gateway,
destinationip1: "142.250.188.206",
destinationip2: "142.250.188.206",
destinationip3: "142.250.188.206",
template: egressRouterTemplate,
}
egressrouter.createEgressRouterMultipeDst(oc)
err = waitForPodWithLabelReady(oc, ns1, "app=egress-router-cni")
exutil.AssertWaitPollNoErr(err, "EgressRouter pod is not ready!")
exutil.By("4. Schedule the worker \n")
// In rdu1 and rdu2 clusters, there are two sriov nodes with mlx nic, by default, egressrouter case cannot run on it
// So here exclude sriov nodes in rdu1 and rdu2 clusters, just use the other common worker nodes
workers := excludeSriovNodes(oc)
o.Expect(len(workers) > 0).Should(o.BeTrue(), fmt.Sprintf("The number of common worker nodes in the cluster is %v ", len(workers)))
if len(workers) < nodeList.Size() {
e2e.Logf("There are sriov workers in the cluster, will schedule the egress router pod to a common node.")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "deployment/egress-router-cni-deployment", "-p", "{\"spec\":{\"template\":{\"spec\":{\"nodeName\":\""+workers[0]+"\"}}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("-n", ns1, "status", "deployment/egress-router-cni-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("successfully rolled out"))
}
exutil.By("5. Create serive for egress router pod! \n")
createResourceFromFile(oc, ns1, egressRouterService)
exutil.By("6. create hello pod in ns1 \n")
createResourceFromFile(oc, ns1, statefulSetHelloPod)
podErr := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodname := getPodName(oc, ns1, "app=hello")
exutil.By("7. Get service IP \n")
svcIPv4, _ := getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
exutil.By("8. Check result,the svc for egessrouter can be accessed \n")
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":5000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:5000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":6000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:6000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":80 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:80 with error:%v", svcIPv4, err))
})
| |||||
test case
|
openshift/openshift-tests-private
|
c1e256c8-68cb-482d-b789-e9d55f6910ca
|
ConnectedOnly-NonPreRelease-PstChkUpgrade-Author:jechen-High-63155-Pst Egress router redirect mode with multiple destinations should still be funcitonal after upgrade.
|
['"fmt"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressrouter.go
|
g.It("ConnectedOnly-NonPreRelease-PstChkUpgrade-Author:jechen-High-63155-Pst Egress router redirect mode with multiple destinations should still be funcitonal after upgrade.", func() {
ipStackType := checkIPStackType(oc)
exutil.By("Skip testing on ipv6 single stack cluster")
if ipStackType == "ipv6single" {
g.Skip("Skip for single stack cluster!!!")
}
ns1 := "63155-upgrade-ns"
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", ns1).Execute()
if nsErr != nil {
g.Skip("Skip the PstChkUpgrade test as 63155-upgrade-ns namespace does not exist, PreChkUpgrade test did not run")
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns1, "--ignore-not-found=true").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "hello-pod1", "-n", ns1, "--ignore-not-found=true").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("egressrouters", "egressrouter-63155", "-n", ns1, "--ignore-not-found=true").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("Service", "ovn-egressrouter-multidst-svc", "-n", ns1, "--ignore-not-found=true").Execute()
exutil.By("1. check egressrouter pod \n")
err := waitForPodWithLabelReady(oc, ns1, "app=egress-router-cni")
exutil.AssertWaitPollNoErr(err, "EgressRouter pod is not ready!")
exutil.By("2. check egressrouter deployment \n")
output, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("-n", ns1, "status", "deployment/egress-router-cni-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("successfully rolled out"))
exutil.By("3. Get the hello pod in ns1 \n")
helloPodname := getPodName(oc, ns1, "app=hello")
o.Expect(len(helloPodname)).Should(o.Equal(1))
exutil.By("4. Get egressrouter service IP \n")
svcIPv4, _ := getSvcIP(oc, ns1, "ovn-egressrouter-multidst-svc")
exutil.By("5. Check svc for egessrouter can be accessed \n")
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":5000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:5000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":6000 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:6000 with error:%v", svcIPv4, err))
_, err = e2eoutput.RunHostCmd(ns1, helloPodname[0], "curl -s "+svcIPv4+":80 --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to access %s:80 with error:%v", svcIPv4, err))
})
| |||||
test
|
openshift/openshift-tests-private
|
10af2a23-6c8e-451b-94ad-b1c5c05486d9
|
hypershift_hosted
|
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/hypershift_hosted.go
|
package networking
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN OVN hypershift", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIForKubeOpenShift("networking-ovnkubernetes-" + getRandomString())
hostedClusterName, hostedClusterKubeconfig, hostedclusterNS string
)
g.BeforeEach(func() {
// Check the network plugin type
networkType := exutil.CheckNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip case on cluster that has non-OVN network plugin!!")
}
hostedClusterName, hostedClusterKubeconfig, hostedclusterNS = exutil.ValidHypershiftAndGetGuestKubeConf(oc)
oc.SetGuestKubeconf(hostedClusterKubeconfig)
})
g.It("HyperShiftMGMT-NonPreRelease-Longduration-ConnectedOnly-Author:jechen-High-67347-VMI on BM Kubevirt hypershift cluster can be lively migrated from one host to another host. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
migrationTemplate := filepath.Join(buildPruningBaseDir, "kubevirt-live-migration-job-template.yaml")
hyperShiftMgmtNS := hostedclusterNS + "-" + hostedClusterName
e2e.Logf("hyperShiftMgmtNS: %v\n", hyperShiftMgmtNS)
mgmtClusterPlatform := exutil.CheckPlatform(oc)
e2e.Logf("mgmt cluster platform: %v\n", mgmtClusterPlatform)
nestedClusterPlatform := exutil.CheckPlatform(oc.AsAdmin().AsGuestKubeconf())
e2e.Logf("hosted cluster platform: %v\n", nestedClusterPlatform)
if !strings.Contains(mgmtClusterPlatform, "baremetal") || !strings.Contains(nestedClusterPlatform, "kubevirt") {
g.Skip("Live migration can only be performed on Baremetal Kubevirt Hypershift, skip all other platforms")
}
exutil.By("1. Get the first VMI on mgmt cluster to perform live migration \n")
vmi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", "-n", hyperShiftMgmtNS, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeList, err := exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
o.Expect(err).NotTo(o.HaveOccurred())
origScheduleableWorkerNodeCount := len(nodeList)
exutil.By("2. Get IP address, hosted nodename, status of the VMI before live migration \n")
originalIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.interfaces[0].ipAddress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("originalIP: %v\n", originalIP)
OriginalNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.metadata.labels.kubevirt\\.io\\/nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("OriginalNodeName: %v\n", OriginalNodeName)
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.conditions[*].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("status: %v\n", status)
o.Expect(strings.Contains(status, "Ready")).To(o.BeTrue())
o.Expect(strings.Contains(status, "LiveMigratable")).To(o.BeTrue())
exutil.By("3. Perform live migration on the VMI \n")
migrationjob := migrationDetails{
name: "migration-job-67347",
template: migrationTemplate,
namespace: hyperShiftMgmtNS,
virtualmachinesintance: vmi,
}
defer migrationjob.deleteMigrationJob(oc)
migrationjob.createMigrationJob(oc)
exutil.By("4. Check live migration status \n")
o.Eventually(func() bool {
migrationStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmim", migrationjob.name, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.phase}").Output()
return err == nil && migrationStatus == "Succeeded"
}, "300s", "10s").Should(o.BeTrue(), "Live migration did not succeed!!")
exutil.By("5. Get IP address, hosted nodename, status of the VMI again after live migration, IP address should remind same while VM is migrated onto a new nodename, and in Ready state \n")
currentIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.interfaces[0].ipAddress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentIP: %v\n", currentIP)
o.Expect(currentIP).To(o.Equal(originalIP))
currentNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.metadata.labels.kubevirt\\.io\\/nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentNodeName: %v\n", currentNodeName)
o.Expect(strings.Contains(currentNodeName, OriginalNodeName)).To(o.BeFalse())
newStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.conditions[*].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("newStatus: %v\n", newStatus)
o.Expect(strings.Contains(newStatus, "Ready")).To(o.BeTrue())
exutil.By("6. All hosted cluster nodes should remain in Ready state 2 minutes after migration, same number of hosted cluster nodes remain in Ready state \n")
o.Consistently(func() int {
nodeList, err = exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
return (len(nodeList))
}, 120*time.Second, 10*time.Second).Should(o.Equal(origScheduleableWorkerNodeCount))
exutil.By("7. Check operators state on management cluster and hosted cluster, they should all be in healthy state \n")
checkAllClusterOperatorsState(oc, 10, 1)
checkAllClusterOperatorsState(oc.AsGuestKubeconf(), 10, 1)
exutil.By("8. Check health of OVNK on management cluster \n")
checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
exutil.By("9. Delete the migration job \n")
migrationjob.deleteMigrationJob(oc)
})
g.It("HyperShiftMGMT-NonPreRelease-ConnectedOnly-Author:jechen-High-68417-On hosted cluster with Proxy and readinessEndpoint configured, traffic to readinessEndpoint should be sent out through hosted cluster node not mgmt cluster node, and CA bundles can be created on hosted cluster. [Disruptive]", func() {
// This is for bug https://issues.redhat.com/browse/OCPBUGS-14819
var (
dirname = "/tmp/OCP-68417"
name = dirname + "/OCP-68417-custom"
validity = 3650
caSubj = dirname + "/OU=openshift/CN=admin-kubeconfig-signer-custom"
)
if !checkProxy(oc.AsGuestKubeconf()) {
g.Skip("There is no proxy on hosted cluster, skip the test.")
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
url := "www.google.com"
ns := "68417-test-ns"
exutil.By("1. Patch hosted cluster to add readiness endpoints to its proxy\n")
origReadinessEndPoints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.readinessEndpoints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("origReadinessEndPoints: %v\n", origReadinessEndPoints)
patchResource := "hostedcluster/" + hostedClusterName
patchAdd := "{\"spec\":{\"configuration\":{\"proxy\":{\"readinessEndpoints\":[\"http://" + url + "\", \"https://" + url + "\"]}}}}"
var patchRemove string
if origReadinessEndPoints == "" {
origReadinessEndPoints = "[]" // when original readinessEndpoints is empty string, [] needs to be added around the empty string
}
patchRemove = "{\"spec\":{\"configuration\":{\"proxy\":{\"readinessEndpoints\":" + origReadinessEndPoints + "}}}}"
defer patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchRemove)
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
readinessEndPoints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.readinessEndpoints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("readinessEndPoints: %v\n", readinessEndPoints)
o.Expect(readinessEndPoints).Should(o.And(
o.ContainSubstring("http://"+url),
o.ContainSubstring("https://"+url)))
proxyIP, proxyPort := getProxyIPandPortOnHostedCluster(oc, hostedClusterName, hostedclusterNS)
o.Expect(proxyIP).ShouldNot(o.Equal(""))
o.Expect(proxyPort).ShouldNot(o.Equal(""))
scheduleableNodes, err := getReadySchedulableNodesOnHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. Start tcpdump on on hosted cluster host, verify proxyIP.port string can be captured in tcpdump of all hosted cluster nodes")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nneep -i any dst %s or src %s and port %s", proxyIP, proxyIP, proxyPort)
for _, hostedClusterNode := range scheduleableNodes {
tcpdumpOutput, err := oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("debug").Args("node/"+hostedClusterNode, "--", "bash", "-c", tcpdumpCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutput).Should(o.ContainSubstring(proxyIP + "." + proxyPort))
}
exutil.By("3. Start tcpdump on CNO's host, verify proxyIP.port string should not be captured in tcpdump on CNO node")
// get CNO pod on management cluster
CNOPod := getPodName(oc, "openshift-network-operator", "name=network-operator")
o.Expect(len(CNOPod)).ShouldNot(o.Equal(0))
o.Expect(CNOPod[0]).ShouldNot(o.Equal(""))
// get the node that hosts the CNO pod on mgmt cluster
CNOHost, err := exutil.GetPodNodeName(oc, "openshift-network-operator", CNOPod[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(CNOHost).ShouldNot(o.Equal(""))
tcpdumpOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+CNOHost, "--", "bash", "-c", tcpdumpCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutput).ShouldNot(o.ContainSubstring(proxyIP + "." + proxyPort))
exutil.By("4. Create test project and test pod on hosted cluster\n")
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--ignore-not-found=true").Execute()
oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns).Execute()
exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), ns)
testPod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("pod", testPod.name, "-n", testPod.namespace, "--ignore-not-found=true").Execute()
testPod.createPingPod(oc.AsGuestKubeconf())
waitPodReady(oc.AsGuestKubeconf(), testPod.namespace, testPod.name)
// find the node that hosts the test pod on hosted cluster
testPodNode, err := exutil.GetPodNodeName(oc.AsGuestKubeconf(), ns, testPod.name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(testPodNode).ShouldNot(o.Equal(""))
exutil.By("5. Enable tcpdump on hosted cluster node where test pod resides and CNO host on management cluster\n")
tcpdumpCmd = fmt.Sprintf("timeout 180s tcpdump -c 4 -nneep -i any host %s and port 443", url)
// enable tcpdump on hosted cluster node
tcpdumpOnHosted, tcpdumpOutputOnHosted, _, err := oc.AsGuestKubeconf().AsAdmin().Run("debug").Args("node/"+testPodNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer tcpdumpOnHosted.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// enable tcpdump on CNO host on management cluster
tcpdumpOnMgmt, tcpdumpOutputOnMgmt, _, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+CNOHost, "--", "bash", "-c", tcpdumpCmd).Background()
defer tcpdumpOnMgmt.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6. curl https://www.google.com from test pod on hosted cluster node")
pingCurlCmds := fmt.Sprintf("ping -c 1 %s ; curl -I -k https://%s --connect-timeout 5", url, url)
output, err := oc.AsGuestKubeconf().AsAdmin().Run("exec").Args("-n", testPod.namespace, testPod.name, "--", "/bin/sh", "-c", pingCurlCmds).Output()
o.Expect(err).To(o.HaveOccurred()) // error is expected when trying to ping or curl the url due to proxy
// match out the IP address for the readinessEndpoint from output of ping command
re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)
urlIPv4 := re.FindAllString(output, -1)[0]
e2e.Logf("urlIPv4: %v\n", urlIPv4)
exutil.By("7. Verify traffic to readinessEndpoint goes through node on hosted cluster not through node on management cluster")
cmdErr1 := tcpdumpOnHosted.Wait()
o.Expect(cmdErr1).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutputOnHosted.String()).To(o.ContainSubstring(urlIPv4))
cmdErr2 := tcpdumpOnMgmt.Wait()
o.Expect(cmdErr2).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutputOnMgmt.String()).NotTo(o.ContainSubstring(urlIPv4))
// Generation of a new self-signed CA
exutil.By("8. Generation of a new self-signed CA")
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Generate the CA private key")
opensslCmd := fmt.Sprintf(`openssl genrsa -out %s-ca.key 4096`, name)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("9. Create the CA certificate")
opensslCmd = fmt.Sprintf(`openssl req -x509 -new -nodes -key %s-ca.key -sha256 -days %d -out %s-ca.crt -subj %s`, name, validity, name, caSubj)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("10. Create a configmap from the CA onto hosted cluster")
configmapName := "custom-ca"
customCA := "--from-file=ca-bundle.crt=" + name + "-ca.crt"
e2e.Logf("\n customCA is %v", customCA)
defer func() {
_, delErr := oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("configmap", configmapName, "-n", "openshift-config", "--ignore-not-found=true").Output()
o.Expect(delErr).NotTo(o.HaveOccurred())
}()
_, createErr := oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("configmap", configmapName, customCA, "-n", "openshift-config").Output()
o.Expect(createErr).NotTo(o.HaveOccurred())
g.By("11. Check if configmap is successfully configured in openshift-config namesapce on hosted cluster")
err = checkConfigMap(oc.AsGuestKubeconf(), "openshift-config", configmapName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cm %v not found on hosted cluster", configmapName))
g.By("12. Patch the configmap created above to hosted cluster, verify trustedCA can be created")
defer func() {
innerPollingInterval := 10 * time.Second
innerPollingIterationCount := 3
outerPollingInterval := 15 * time.Second
outerPollingTimeout := 5 * time.Minute
// Returns true only if all Nodes stay ready for a while
nodesStayHealthyForAWhile := func() bool {
for count := 0; count < innerPollingIterationCount; count++ {
// Wait a little before checking all nodes on hosted cluster all together
time.Sleep(innerPollingInterval)
for _, hostedClusterNode := range scheduleableNodes {
statusOutput, err := oc.AsGuestKubeconf().Run("get").Args("nodes", hostedClusterNode, "-ojsonpath={.status.conditions[-1].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n status for node %v is: %v", hostedClusterNode, statusOutput)
if statusOutput != "True" { // when node is in Ready state, status output returned from line 295 is "True"
return false
}
}
}
return true
}
o.Eventually(nodesStayHealthyForAWhile).WithTimeout(outerPollingTimeout).WithPolling(outerPollingInterval).Should(o.BeTrue())
}()
origTrustedCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.trustedCA.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("origTrustedCA: %v\n", origTrustedCA)
patchRemove = "{\"spec\":{\"configuration\":{\"proxy\":{\"trustedCA\":{\"name\":\"" + origTrustedCA + "\"}}}}}"
patchAdd = "{\"spec\":{\"configuration\":{\"proxy\":{\"trustedCA\":{\"name\":\"custom-ca\"}}}}}"
defer patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchRemove)
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
trustedCAName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.trustedCA.name}").Output()
e2e.Logf("trustedCAName: %v\n", trustedCAName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(trustedCAName).Should(o.Equal(configmapName))
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchRemove)
})
g.It("HyperShiftMGMT-NonPreRelease-Longduration-ConnectedOnly-Author:jechen-High-70261-Network Connectivity is not broken even if BM Kubevirt VM migration fails. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
migrationTemplate := filepath.Join(buildPruningBaseDir, "kubevirt-live-migration-job-template.yaml")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
ns1 := "70261-test-ns1-on-hostedcluster" //namespace for hosted cluster has to be all lowercased, that is why hostedcluster is used here, instead of hostedCluster
ns2 := "70261-test-ns2-on-hostedcluster"
hyperShiftMgmtNS := hostedclusterNS + "-" + hostedClusterName
e2e.Logf("hyperShiftMgmtNS: %v\n", hyperShiftMgmtNS)
mgmtClusterPlatform := exutil.CheckPlatform(oc)
e2e.Logf("mgmt cluster platform: %v\n", mgmtClusterPlatform)
nestedClusterPlatform := exutil.CheckPlatform(oc.AsAdmin().AsGuestKubeconf())
e2e.Logf("hosted cluster platform: %v\n", nestedClusterPlatform)
if !strings.Contains(mgmtClusterPlatform, "baremetal") || !strings.Contains(nestedClusterPlatform, "kubevirt") {
g.Skip("Live migration can only be performed on Baremetal Kubevirt Hypershift, skip all other platforms")
}
exutil.By("1. Get node list on hosted cluster\n")
allNodeListOnHostedCluster, err := exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
o.Expect(err).NotTo(o.HaveOccurred())
origScheduleableWorkerNodeCount := len(allNodeListOnHostedCluster)
nodePoolName := exutil.GetNodePoolNamesbyHostedClusterName(oc, hostedClusterName, hostedclusterNS)
o.Expect(len(nodePoolName)).ShouldNot(o.Equal(0))
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeNames)).ShouldNot(o.Equal(0))
e2e.Logf("The nodes in nodepool %v is:\n%v", nodePoolName[0], nodeNames)
exutil.By("2. Get the first VMI on mgmt cluster for live migration, check it is live migratable \n")
vmi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", "-n", hyperShiftMgmtNS, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.conditions[*].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("status: %v\n", status)
o.Expect(strings.Contains(status, "Ready")).To(o.BeTrue())
o.Expect(strings.Contains(status, "LiveMigratable")).To(o.BeTrue())
exutil.By("3. Before perform live migration, create test project and test pod on the node that will involve live migration\n")
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("project", ns1, "--ignore-not-found=true").Execute()
oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns1).Execute()
exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), ns1)
testPod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: vmi,
template: pingPodNodeTemplate,
}
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("pod", testPod1.name, "-n", testPod1.namespace, "--ignore-not-found=true").Execute()
testPod1.createPingPodNode(oc.AsGuestKubeconf())
waitPodReady(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name)
exutil.By("4. Delibrately set kubevirt.io/func-test-virt-launcher-fail-fast=true on the VMI that will be performed live migration so its migration will fail\n")
defer oc.AsAdmin().WithoutNamespace().Run("annotate").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "kubevirt.io/func-test-virt-launcher-fail-fast=false", "--overwrite").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "kubevirt.io/func-test-virt-launcher-fail-fast=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Perform live migration on the VMI \n")
migrationjob := migrationDetails{
name: "migration-job-70261",
template: migrationTemplate,
namespace: hyperShiftMgmtNS,
virtualmachinesintance: vmi,
}
defer migrationjob.deleteMigrationJob(oc)
migrationjob.createMigrationJob(oc)
exutil.By("6. Check live migration status, live migration is expected to fail due to annoation from step 4 \n")
o.Eventually(func() bool {
migrationStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmim", migrationjob.name, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.phase}").Output()
return err == nil && migrationStatus == "Failed"
}, "300s", "10s").Should(o.BeTrue(), "Live migration did not fail as expected!!")
exutil.By("7. All hosted cluster nodes should remain in Ready state 2 minutes after attempted migration, same number of hosted cluster nodes remain in Ready state \n")
o.Consistently(func() int {
nodeList, err := exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
o.Expect(err).NotTo(o.HaveOccurred())
return (len(nodeList))
}, 120*time.Second, 10*time.Second).Should(o.Equal(origScheduleableWorkerNodeCount))
exutil.By("8. Check operators state on management cluster and hosted cluster, they should all be in healthy state \n")
checkAllClusterOperatorsState(oc, 10, 1)
checkAllClusterOperatorsState(oc.AsGuestKubeconf(), 10, 1)
exutil.By("9. Check health of OVNK on management cluster \n")
checkOVNKState(oc)
exutil.By("10. Create a second test project and test pod on a different node of the hosted cluster after attempted live migration\n")
// remove the node the involves attempted live migration from node list, get the other nodes from the hosted cluster
var nodeLeft []string
for i, v := range nodeNames {
if v == vmi {
nodeLeft = append(nodeNames[:i], nodeNames[i+1:]...)
break
}
}
e2e.Logf("\n Get other nodes from node list of the hosted cluster: %v\n", nodeLeft)
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("project", ns2, "--ignore-not-found=true").Execute()
oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns2).Execute()
exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), ns2)
var testPod2Node string
if len(nodeLeft) < 1 {
e2e.Logf("There is no other node on the hosted cluster, create testPod2 on same VMI node")
testPod2Node = vmi
} else {
e2e.Logf("There is some other node on the hosted cluster, create testPod2 on some other node")
testPod2Node = nodeLeft[0]
}
testPod2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: testPod2Node,
template: pingPodNodeTemplate,
}
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("pod", testPod2.name, "-n", testPod2.namespace, "--ignore-not-found=true").Execute()
testPod2.createPingPodNode(oc.AsGuestKubeconf())
waitPodReady(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name)
exutil.By("11. Pod created before attempted live migration should be able to communicate with pod created after attempted live migration\n")
testPod1IP1, testPod1IP2 := getPodIP(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name)
e2e.Logf("\n Got ip address for testPod1 is: %v, %v\n", testPod1IP1, testPod1IP2)
testPod2IP1, testPod2IP2 := getPodIP(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name)
e2e.Logf("\n Got ip address for testPod2 is: %v, %v\n", testPod2IP1, testPod2IP2)
// Curl testPod 1 from testPod2
cmd1 := "curl --connect-timeout 5 -s " + testPod1IP1 + ":8080"
cmd2 := "curl --connect-timeout 5 -s " + testPod1IP2 + ":8080"
if testPod1IP2 != "" {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name, cmd2)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Curl from testPod2 from testPod1
cmd1 = "curl --connect-timeout 5 -s " + testPod2IP1 + ":8080"
cmd2 = "curl --connect-timeout 5 -s " + testPod2IP2 + ":8080"
if testPod2IP2 != "" {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name, cmd2)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
g.It("Author:jechen-HyperShiftMGMT-ConnectedOnly-High-74596-Even with a FQDN proxy configured on hostedCluster, connection can be made to the readinessEndpoint under noProxy that bypass the proxy [Disruptive]", func() {
// This is for bug https://issues.redhat.com/browse/OCPBUGS-33526
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
squidProxyDeploymentFile := filepath.Join(buildPruningBaseDir, "proxy_deployment.yaml")
url := "www.google.com"
exutil.By("1. create new namespace\n")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "anyuid", "-z", "default", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. Deploy a squid deployment in the namespace then expose its service\n")
defer removeResource(oc, true, true, "deployment", "squid-deployment", ns)
defer removeResource(oc, true, true, "service", "squid-deployment", ns)
createResourceFromFile(oc, ns, squidProxyDeploymentFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", ns, "squid-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "squid-deployment")).To(o.BeTrue())
err = waitForPodWithLabelReady(oc, ns, "app=squid")
exutil.AssertWaitPollNoErr(err, "Not all squid pods with label app=squid are ready")
squidPods := getPodName(oc, ns, "app=squid")
o.Expect(len(squidPods)).Should(o.Equal(1))
defer removeResource(oc, true, true, "pod", squidPods[0], ns)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("deployment/squid-deployment", "--type=LoadBalancer", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
LBSVCHostname := getLBSVCHostname(oc, ns, "squid-deployment")
e2e.Logf("\n\n\n Got hostname for the squid service: %v\n", LBSVCHostname)
exutil.By("3. Patch hosted cluster to add squid proxy as its proxy\n")
origProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchResource := "hostedcluster/" + hostedClusterName
patchRestore := fmt.Sprintf(`[{"op": "replace", "path": "/spec/configuration/proxy", "value":%s}]`, origProxy)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", hostedclusterNS, patchResource, "--type=json", "-p", patchRestore).Execute()
proxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("proxy is restored to: %s\n", proxy)
o.Expect(proxy).Should(o.ContainSubstring(origProxy))
}()
proxyValue := "http://" + LBSVCHostname + ":3128"
patchAdd := "{\"spec\":{\"configuration\":{\"proxy\":{\"httpProxy\":\"" + proxyValue + "\", \"httpsProxy\":\"" + proxyValue + "\"}}}}"
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
proxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("proxy: %s\n", proxy)
expectedProxy1 := fmt.Sprintf(`"httpProxy":"http://%s:3128"`, LBSVCHostname)
expectedProxy2 := fmt.Sprintf(`"httpsProxy":"http://%s:3128"`, LBSVCHostname)
o.Expect(proxy).Should(o.And(o.ContainSubstring(expectedProxy1), o.ContainSubstring(expectedProxy2)))
exutil.By("4. Patch hosted cluster to add squid proxy to noProxy, then set its readinessEndpoint to www.google.com\n")
patchAdd = "{\"spec\":{\"configuration\":{\"proxy\":{\"noProxy\":\"" + LBSVCHostname + "\"}}}}"
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
readinessEP := "https://" + url
patchAdd = "{\"spec\":{\"configuration\":{\"proxy\":{\"readinessEndpoints\":[\"" + readinessEP + "\"]}}}}"
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
noProxyOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.noProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(noProxyOutput, LBSVCHostname)).To(o.BeTrue())
readinessEPOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.readinessEndpoints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(readinessEPOutput, url)).To(o.BeTrue())
// give some time for readinessEndpoints under noProxy to take effect
time.Sleep(30 * time.Second)
exutil.By("5. Check squid pod to confirm connectivity to www.google.com succeed\n")
expectedString := fmt.Sprintf(`CONNECT %s:443`, url)
o.Eventually(func() bool {
podLogs, LogErr := checkLogMessageInPod(oc, ns, "tailer", squidPods[0], "google.com")
o.Expect(LogErr).NotTo(o.HaveOccurred())
return strings.Contains(podLogs, expectedString)
}, "5m", "10s").Should(o.BeTrue(), "Connection to the readinessEndpoint under noProxy did not succeed!!")
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
1b78bca7-ea84-4e46-b89d-08b88fd743a7
|
HyperShiftMGMT-NonPreRelease-Longduration-ConnectedOnly-Author:jechen-High-67347-VMI on BM Kubevirt hypershift cluster can be lively migrated from one host to another host. [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/hypershift_hosted.go
|
g.It("HyperShiftMGMT-NonPreRelease-Longduration-ConnectedOnly-Author:jechen-High-67347-VMI on BM Kubevirt hypershift cluster can be lively migrated from one host to another host. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
migrationTemplate := filepath.Join(buildPruningBaseDir, "kubevirt-live-migration-job-template.yaml")
hyperShiftMgmtNS := hostedclusterNS + "-" + hostedClusterName
e2e.Logf("hyperShiftMgmtNS: %v\n", hyperShiftMgmtNS)
mgmtClusterPlatform := exutil.CheckPlatform(oc)
e2e.Logf("mgmt cluster platform: %v\n", mgmtClusterPlatform)
nestedClusterPlatform := exutil.CheckPlatform(oc.AsAdmin().AsGuestKubeconf())
e2e.Logf("hosted cluster platform: %v\n", nestedClusterPlatform)
if !strings.Contains(mgmtClusterPlatform, "baremetal") || !strings.Contains(nestedClusterPlatform, "kubevirt") {
g.Skip("Live migration can only be performed on Baremetal Kubevirt Hypershift, skip all other platforms")
}
exutil.By("1. Get the first VMI on mgmt cluster to perform live migration \n")
vmi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", "-n", hyperShiftMgmtNS, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeList, err := exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
o.Expect(err).NotTo(o.HaveOccurred())
origScheduleableWorkerNodeCount := len(nodeList)
exutil.By("2. Get IP address, hosted nodename, status of the VMI before live migration \n")
originalIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.interfaces[0].ipAddress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("originalIP: %v\n", originalIP)
OriginalNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.metadata.labels.kubevirt\\.io\\/nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("OriginalNodeName: %v\n", OriginalNodeName)
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.conditions[*].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("status: %v\n", status)
o.Expect(strings.Contains(status, "Ready")).To(o.BeTrue())
o.Expect(strings.Contains(status, "LiveMigratable")).To(o.BeTrue())
exutil.By("3. Perform live migration on the VMI \n")
migrationjob := migrationDetails{
name: "migration-job-67347",
template: migrationTemplate,
namespace: hyperShiftMgmtNS,
virtualmachinesintance: vmi,
}
defer migrationjob.deleteMigrationJob(oc)
migrationjob.createMigrationJob(oc)
exutil.By("4. Check live migration status \n")
o.Eventually(func() bool {
migrationStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmim", migrationjob.name, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.phase}").Output()
return err == nil && migrationStatus == "Succeeded"
}, "300s", "10s").Should(o.BeTrue(), "Live migration did not succeed!!")
exutil.By("5. Get IP address, hosted nodename, status of the VMI again after live migration, IP address should remind same while VM is migrated onto a new nodename, and in Ready state \n")
currentIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.interfaces[0].ipAddress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentIP: %v\n", currentIP)
o.Expect(currentIP).To(o.Equal(originalIP))
currentNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.metadata.labels.kubevirt\\.io\\/nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentNodeName: %v\n", currentNodeName)
o.Expect(strings.Contains(currentNodeName, OriginalNodeName)).To(o.BeFalse())
newStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.conditions[*].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("newStatus: %v\n", newStatus)
o.Expect(strings.Contains(newStatus, "Ready")).To(o.BeTrue())
exutil.By("6. All hosted cluster nodes should remain in Ready state 2 minutes after migration, same number of hosted cluster nodes remain in Ready state \n")
o.Consistently(func() int {
nodeList, err = exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
return (len(nodeList))
}, 120*time.Second, 10*time.Second).Should(o.Equal(origScheduleableWorkerNodeCount))
exutil.By("7. Check operators state on management cluster and hosted cluster, they should all be in healthy state \n")
checkAllClusterOperatorsState(oc, 10, 1)
checkAllClusterOperatorsState(oc.AsGuestKubeconf(), 10, 1)
exutil.By("8. Check health of OVNK on management cluster \n")
checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
exutil.By("9. Delete the migration job \n")
migrationjob.deleteMigrationJob(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
6e0eec7e-07c8-4e12-bc8c-b1b178fd7d78
|
HyperShiftMGMT-NonPreRelease-ConnectedOnly-Author:jechen-High-68417-On hosted cluster with Proxy and readinessEndpoint configured, traffic to readinessEndpoint should be sent out through hosted cluster node not mgmt cluster node, and CA bundles can be created on hosted cluster. [Disruptive]
|
['"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/hypershift_hosted.go
|
g.It("HyperShiftMGMT-NonPreRelease-ConnectedOnly-Author:jechen-High-68417-On hosted cluster with Proxy and readinessEndpoint configured, traffic to readinessEndpoint should be sent out through hosted cluster node not mgmt cluster node, and CA bundles can be created on hosted cluster. [Disruptive]", func() {
// This is for bug https://issues.redhat.com/browse/OCPBUGS-14819
var (
dirname = "/tmp/OCP-68417"
name = dirname + "/OCP-68417-custom"
validity = 3650
caSubj = dirname + "/OU=openshift/CN=admin-kubeconfig-signer-custom"
)
if !checkProxy(oc.AsGuestKubeconf()) {
g.Skip("There is no proxy on hosted cluster, skip the test.")
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
url := "www.google.com"
ns := "68417-test-ns"
exutil.By("1. Patch hosted cluster to add readiness endpoints to its proxy\n")
origReadinessEndPoints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.readinessEndpoints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("origReadinessEndPoints: %v\n", origReadinessEndPoints)
patchResource := "hostedcluster/" + hostedClusterName
patchAdd := "{\"spec\":{\"configuration\":{\"proxy\":{\"readinessEndpoints\":[\"http://" + url + "\", \"https://" + url + "\"]}}}}"
var patchRemove string
if origReadinessEndPoints == "" {
origReadinessEndPoints = "[]" // when original readinessEndpoints is empty string, [] needs to be added around the empty string
}
patchRemove = "{\"spec\":{\"configuration\":{\"proxy\":{\"readinessEndpoints\":" + origReadinessEndPoints + "}}}}"
defer patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchRemove)
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
readinessEndPoints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.readinessEndpoints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("readinessEndPoints: %v\n", readinessEndPoints)
o.Expect(readinessEndPoints).Should(o.And(
o.ContainSubstring("http://"+url),
o.ContainSubstring("https://"+url)))
proxyIP, proxyPort := getProxyIPandPortOnHostedCluster(oc, hostedClusterName, hostedclusterNS)
o.Expect(proxyIP).ShouldNot(o.Equal(""))
o.Expect(proxyPort).ShouldNot(o.Equal(""))
scheduleableNodes, err := getReadySchedulableNodesOnHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. Start tcpdump on on hosted cluster host, verify proxyIP.port string can be captured in tcpdump of all hosted cluster nodes")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nneep -i any dst %s or src %s and port %s", proxyIP, proxyIP, proxyPort)
for _, hostedClusterNode := range scheduleableNodes {
tcpdumpOutput, err := oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("debug").Args("node/"+hostedClusterNode, "--", "bash", "-c", tcpdumpCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutput).Should(o.ContainSubstring(proxyIP + "." + proxyPort))
}
exutil.By("3. Start tcpdump on CNO's host, verify proxyIP.port string should not be captured in tcpdump on CNO node")
// get CNO pod on management cluster
CNOPod := getPodName(oc, "openshift-network-operator", "name=network-operator")
o.Expect(len(CNOPod)).ShouldNot(o.Equal(0))
o.Expect(CNOPod[0]).ShouldNot(o.Equal(""))
// get the node that hosts the CNO pod on mgmt cluster
CNOHost, err := exutil.GetPodNodeName(oc, "openshift-network-operator", CNOPod[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(CNOHost).ShouldNot(o.Equal(""))
tcpdumpOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+CNOHost, "--", "bash", "-c", tcpdumpCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutput).ShouldNot(o.ContainSubstring(proxyIP + "." + proxyPort))
exutil.By("4. Create test project and test pod on hosted cluster\n")
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--ignore-not-found=true").Execute()
oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns).Execute()
exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), ns)
testPod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("pod", testPod.name, "-n", testPod.namespace, "--ignore-not-found=true").Execute()
testPod.createPingPod(oc.AsGuestKubeconf())
waitPodReady(oc.AsGuestKubeconf(), testPod.namespace, testPod.name)
// find the node that hosts the test pod on hosted cluster
testPodNode, err := exutil.GetPodNodeName(oc.AsGuestKubeconf(), ns, testPod.name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(testPodNode).ShouldNot(o.Equal(""))
exutil.By("5. Enable tcpdump on hosted cluster node where test pod resides and CNO host on management cluster\n")
tcpdumpCmd = fmt.Sprintf("timeout 180s tcpdump -c 4 -nneep -i any host %s and port 443", url)
// enable tcpdump on hosted cluster node
tcpdumpOnHosted, tcpdumpOutputOnHosted, _, err := oc.AsGuestKubeconf().AsAdmin().Run("debug").Args("node/"+testPodNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer tcpdumpOnHosted.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// enable tcpdump on CNO host on management cluster
tcpdumpOnMgmt, tcpdumpOutputOnMgmt, _, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+CNOHost, "--", "bash", "-c", tcpdumpCmd).Background()
defer tcpdumpOnMgmt.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6. curl https://www.google.com from test pod on hosted cluster node")
pingCurlCmds := fmt.Sprintf("ping -c 1 %s ; curl -I -k https://%s --connect-timeout 5", url, url)
output, err := oc.AsGuestKubeconf().AsAdmin().Run("exec").Args("-n", testPod.namespace, testPod.name, "--", "/bin/sh", "-c", pingCurlCmds).Output()
o.Expect(err).To(o.HaveOccurred()) // error is expected when trying to ping or curl the url due to proxy
// match out the IP address for the readinessEndpoint from output of ping command
re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)
urlIPv4 := re.FindAllString(output, -1)[0]
e2e.Logf("urlIPv4: %v\n", urlIPv4)
exutil.By("7. Verify traffic to readinessEndpoint goes through node on hosted cluster not through node on management cluster")
cmdErr1 := tcpdumpOnHosted.Wait()
o.Expect(cmdErr1).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutputOnHosted.String()).To(o.ContainSubstring(urlIPv4))
cmdErr2 := tcpdumpOnMgmt.Wait()
o.Expect(cmdErr2).NotTo(o.HaveOccurred())
o.Expect(tcpdumpOutputOnMgmt.String()).NotTo(o.ContainSubstring(urlIPv4))
// Generation of a new self-signed CA
exutil.By("8. Generation of a new self-signed CA")
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Generate the CA private key")
opensslCmd := fmt.Sprintf(`openssl genrsa -out %s-ca.key 4096`, name)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("9. Create the CA certificate")
opensslCmd = fmt.Sprintf(`openssl req -x509 -new -nodes -key %s-ca.key -sha256 -days %d -out %s-ca.crt -subj %s`, name, validity, name, caSubj)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("10. Create a configmap from the CA onto hosted cluster")
configmapName := "custom-ca"
customCA := "--from-file=ca-bundle.crt=" + name + "-ca.crt"
e2e.Logf("\n customCA is %v", customCA)
defer func() {
_, delErr := oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("configmap", configmapName, "-n", "openshift-config", "--ignore-not-found=true").Output()
o.Expect(delErr).NotTo(o.HaveOccurred())
}()
_, createErr := oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("configmap", configmapName, customCA, "-n", "openshift-config").Output()
o.Expect(createErr).NotTo(o.HaveOccurred())
g.By("11. Check if configmap is successfully configured in openshift-config namesapce on hosted cluster")
err = checkConfigMap(oc.AsGuestKubeconf(), "openshift-config", configmapName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cm %v not found on hosted cluster", configmapName))
g.By("12. Patch the configmap created above to hosted cluster, verify trustedCA can be created")
defer func() {
innerPollingInterval := 10 * time.Second
innerPollingIterationCount := 3
outerPollingInterval := 15 * time.Second
outerPollingTimeout := 5 * time.Minute
// Returns true only if all Nodes stay ready for a while
nodesStayHealthyForAWhile := func() bool {
for count := 0; count < innerPollingIterationCount; count++ {
// Wait a little before checking all nodes on hosted cluster all together
time.Sleep(innerPollingInterval)
for _, hostedClusterNode := range scheduleableNodes {
statusOutput, err := oc.AsGuestKubeconf().Run("get").Args("nodes", hostedClusterNode, "-ojsonpath={.status.conditions[-1].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n status for node %v is: %v", hostedClusterNode, statusOutput)
if statusOutput != "True" { // when node is in Ready state, status output returned from line 295 is "True"
return false
}
}
}
return true
}
o.Eventually(nodesStayHealthyForAWhile).WithTimeout(outerPollingTimeout).WithPolling(outerPollingInterval).Should(o.BeTrue())
}()
origTrustedCA, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.trustedCA.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("origTrustedCA: %v\n", origTrustedCA)
patchRemove = "{\"spec\":{\"configuration\":{\"proxy\":{\"trustedCA\":{\"name\":\"" + origTrustedCA + "\"}}}}}"
patchAdd = "{\"spec\":{\"configuration\":{\"proxy\":{\"trustedCA\":{\"name\":\"custom-ca\"}}}}}"
defer patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchRemove)
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
trustedCAName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.trustedCA.name}").Output()
e2e.Logf("trustedCAName: %v\n", trustedCAName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(trustedCAName).Should(o.Equal(configmapName))
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchRemove)
})
| |||||
test case
|
openshift/openshift-tests-private
|
d26a370a-0b4b-4dc4-8668-5750c1118836
|
HyperShiftMGMT-NonPreRelease-Longduration-ConnectedOnly-Author:jechen-High-70261-Network Connectivity is not broken even if BM Kubevirt VM migration fails. [Disruptive]
|
['"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/hypershift_hosted.go
|
g.It("HyperShiftMGMT-NonPreRelease-Longduration-ConnectedOnly-Author:jechen-High-70261-Network Connectivity is not broken even if BM Kubevirt VM migration fails. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
migrationTemplate := filepath.Join(buildPruningBaseDir, "kubevirt-live-migration-job-template.yaml")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
ns1 := "70261-test-ns1-on-hostedcluster" //namespace for hosted cluster has to be all lowercased, that is why hostedcluster is used here, instead of hostedCluster
ns2 := "70261-test-ns2-on-hostedcluster"
hyperShiftMgmtNS := hostedclusterNS + "-" + hostedClusterName
e2e.Logf("hyperShiftMgmtNS: %v\n", hyperShiftMgmtNS)
mgmtClusterPlatform := exutil.CheckPlatform(oc)
e2e.Logf("mgmt cluster platform: %v\n", mgmtClusterPlatform)
nestedClusterPlatform := exutil.CheckPlatform(oc.AsAdmin().AsGuestKubeconf())
e2e.Logf("hosted cluster platform: %v\n", nestedClusterPlatform)
if !strings.Contains(mgmtClusterPlatform, "baremetal") || !strings.Contains(nestedClusterPlatform, "kubevirt") {
g.Skip("Live migration can only be performed on Baremetal Kubevirt Hypershift, skip all other platforms")
}
exutil.By("1. Get node list on hosted cluster\n")
allNodeListOnHostedCluster, err := exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
o.Expect(err).NotTo(o.HaveOccurred())
origScheduleableWorkerNodeCount := len(allNodeListOnHostedCluster)
nodePoolName := exutil.GetNodePoolNamesbyHostedClusterName(oc, hostedClusterName, hostedclusterNS)
o.Expect(len(nodePoolName)).ShouldNot(o.Equal(0))
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeNames)).ShouldNot(o.Equal(0))
e2e.Logf("The nodes in nodepool %v is:\n%v", nodePoolName[0], nodeNames)
exutil.By("2. Get the first VMI on mgmt cluster for live migration, check it is live migratable \n")
vmi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", "-n", hyperShiftMgmtNS, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.conditions[*].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("status: %v\n", status)
o.Expect(strings.Contains(status, "Ready")).To(o.BeTrue())
o.Expect(strings.Contains(status, "LiveMigratable")).To(o.BeTrue())
exutil.By("3. Before perform live migration, create test project and test pod on the node that will involve live migration\n")
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("project", ns1, "--ignore-not-found=true").Execute()
oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns1).Execute()
exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), ns1)
testPod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: vmi,
template: pingPodNodeTemplate,
}
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("pod", testPod1.name, "-n", testPod1.namespace, "--ignore-not-found=true").Execute()
testPod1.createPingPodNode(oc.AsGuestKubeconf())
waitPodReady(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name)
exutil.By("4. Delibrately set kubevirt.io/func-test-virt-launcher-fail-fast=true on the VMI that will be performed live migration so its migration will fail\n")
defer oc.AsAdmin().WithoutNamespace().Run("annotate").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "kubevirt.io/func-test-virt-launcher-fail-fast=false", "--overwrite").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("vmi", vmi, "-n", hyperShiftMgmtNS, "kubevirt.io/func-test-virt-launcher-fail-fast=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Perform live migration on the VMI \n")
migrationjob := migrationDetails{
name: "migration-job-70261",
template: migrationTemplate,
namespace: hyperShiftMgmtNS,
virtualmachinesintance: vmi,
}
defer migrationjob.deleteMigrationJob(oc)
migrationjob.createMigrationJob(oc)
exutil.By("6. Check live migration status, live migration is expected to fail due to annoation from step 4 \n")
o.Eventually(func() bool {
migrationStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("vmim", migrationjob.name, "-n", hyperShiftMgmtNS, "-o=jsonpath={.status.phase}").Output()
return err == nil && migrationStatus == "Failed"
}, "300s", "10s").Should(o.BeTrue(), "Live migration did not fail as expected!!")
exutil.By("7. All hosted cluster nodes should remain in Ready state 2 minutes after attempted migration, same number of hosted cluster nodes remain in Ready state \n")
o.Consistently(func() int {
nodeList, err := exutil.GetSchedulableLinuxWorkerNodes(oc.AsAdmin().AsGuestKubeconf())
o.Expect(err).NotTo(o.HaveOccurred())
return (len(nodeList))
}, 120*time.Second, 10*time.Second).Should(o.Equal(origScheduleableWorkerNodeCount))
exutil.By("8. Check operators state on management cluster and hosted cluster, they should all be in healthy state \n")
checkAllClusterOperatorsState(oc, 10, 1)
checkAllClusterOperatorsState(oc.AsGuestKubeconf(), 10, 1)
exutil.By("9. Check health of OVNK on management cluster \n")
checkOVNKState(oc)
exutil.By("10. Create a second test project and test pod on a different node of the hosted cluster after attempted live migration\n")
// remove the node the involves attempted live migration from node list, get the other nodes from the hosted cluster
var nodeLeft []string
for i, v := range nodeNames {
if v == vmi {
nodeLeft = append(nodeNames[:i], nodeNames[i+1:]...)
break
}
}
e2e.Logf("\n Get other nodes from node list of the hosted cluster: %v\n", nodeLeft)
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("project", ns2, "--ignore-not-found=true").Execute()
oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns2).Execute()
exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), ns2)
var testPod2Node string
if len(nodeLeft) < 1 {
e2e.Logf("There is no other node on the hosted cluster, create testPod2 on same VMI node")
testPod2Node = vmi
} else {
e2e.Logf("There is some other node on the hosted cluster, create testPod2 on some other node")
testPod2Node = nodeLeft[0]
}
testPod2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: testPod2Node,
template: pingPodNodeTemplate,
}
defer oc.AsGuestKubeconf().AsAdmin().WithoutNamespace().Run("delete").Args("pod", testPod2.name, "-n", testPod2.namespace, "--ignore-not-found=true").Execute()
testPod2.createPingPodNode(oc.AsGuestKubeconf())
waitPodReady(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name)
exutil.By("11. Pod created before attempted live migration should be able to communicate with pod created after attempted live migration\n")
testPod1IP1, testPod1IP2 := getPodIP(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name)
e2e.Logf("\n Got ip address for testPod1 is: %v, %v\n", testPod1IP1, testPod1IP2)
testPod2IP1, testPod2IP2 := getPodIP(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name)
e2e.Logf("\n Got ip address for testPod2 is: %v, %v\n", testPod2IP1, testPod2IP2)
// Curl testPod 1 from testPod2
cmd1 := "curl --connect-timeout 5 -s " + testPod1IP1 + ":8080"
cmd2 := "curl --connect-timeout 5 -s " + testPod1IP2 + ":8080"
if testPod1IP2 != "" {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name, cmd2)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod2.namespace, testPod2.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Curl from testPod2 from testPod1
cmd1 = "curl --connect-timeout 5 -s " + testPod2IP1 + ":8080"
cmd2 = "curl --connect-timeout 5 -s " + testPod2IP2 + ":8080"
if testPod2IP2 != "" {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name, cmd2)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := execCommandInSpecificPod(oc.AsGuestKubeconf(), testPod1.namespace, testPod1.name, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
3b9ba5a3-e9b6-4a38-b6ba-86a7d6bbc8b6
|
Author:jechen-HyperShiftMGMT-ConnectedOnly-High-74596-Even with a FQDN proxy configured on hostedCluster, connection can be made to the readinessEndpoint under noProxy that bypass the proxy [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/hypershift_hosted.go
|
g.It("Author:jechen-HyperShiftMGMT-ConnectedOnly-High-74596-Even with a FQDN proxy configured on hostedCluster, connection can be made to the readinessEndpoint under noProxy that bypass the proxy [Disruptive]", func() {
// This is for bug https://issues.redhat.com/browse/OCPBUGS-33526
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
squidProxyDeploymentFile := filepath.Join(buildPruningBaseDir, "proxy_deployment.yaml")
url := "www.google.com"
exutil.By("1. create new namespace\n")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "anyuid", "-z", "default", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. Deploy a squid deployment in the namespace then expose its service\n")
defer removeResource(oc, true, true, "deployment", "squid-deployment", ns)
defer removeResource(oc, true, true, "service", "squid-deployment", ns)
createResourceFromFile(oc, ns, squidProxyDeploymentFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", ns, "squid-deployment").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "squid-deployment")).To(o.BeTrue())
err = waitForPodWithLabelReady(oc, ns, "app=squid")
exutil.AssertWaitPollNoErr(err, "Not all squid pods with label app=squid are ready")
squidPods := getPodName(oc, ns, "app=squid")
o.Expect(len(squidPods)).Should(o.Equal(1))
defer removeResource(oc, true, true, "pod", squidPods[0], ns)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("deployment/squid-deployment", "--type=LoadBalancer", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
LBSVCHostname := getLBSVCHostname(oc, ns, "squid-deployment")
e2e.Logf("\n\n\n Got hostname for the squid service: %v\n", LBSVCHostname)
exutil.By("3. Patch hosted cluster to add squid proxy as its proxy\n")
origProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchResource := "hostedcluster/" + hostedClusterName
patchRestore := fmt.Sprintf(`[{"op": "replace", "path": "/spec/configuration/proxy", "value":%s}]`, origProxy)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", hostedclusterNS, patchResource, "--type=json", "-p", patchRestore).Execute()
proxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("proxy is restored to: %s\n", proxy)
o.Expect(proxy).Should(o.ContainSubstring(origProxy))
}()
proxyValue := "http://" + LBSVCHostname + ":3128"
patchAdd := "{\"spec\":{\"configuration\":{\"proxy\":{\"httpProxy\":\"" + proxyValue + "\", \"httpsProxy\":\"" + proxyValue + "\"}}}}"
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
proxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("proxy: %s\n", proxy)
expectedProxy1 := fmt.Sprintf(`"httpProxy":"http://%s:3128"`, LBSVCHostname)
expectedProxy2 := fmt.Sprintf(`"httpsProxy":"http://%s:3128"`, LBSVCHostname)
o.Expect(proxy).Should(o.And(o.ContainSubstring(expectedProxy1), o.ContainSubstring(expectedProxy2)))
exutil.By("4. Patch hosted cluster to add squid proxy to noProxy, then set its readinessEndpoint to www.google.com\n")
patchAdd = "{\"spec\":{\"configuration\":{\"proxy\":{\"noProxy\":\"" + LBSVCHostname + "\"}}}}"
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
readinessEP := "https://" + url
patchAdd = "{\"spec\":{\"configuration\":{\"proxy\":{\"readinessEndpoints\":[\"" + readinessEP + "\"]}}}}"
patchResourceAsAdminNS(oc, hostedclusterNS, patchResource, patchAdd)
noProxyOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.noProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(noProxyOutput, LBSVCHostname)).To(o.BeTrue())
readinessEPOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", hostedClusterName, "-n", hostedclusterNS, "-o=jsonpath={.spec.configuration.proxy.readinessEndpoints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(readinessEPOutput, url)).To(o.BeTrue())
// give some time for readinessEndpoints under noProxy to take effect
time.Sleep(30 * time.Second)
exutil.By("5. Check squid pod to confirm connectivity to www.google.com succeed\n")
expectedString := fmt.Sprintf(`CONNECT %s:443`, url)
o.Eventually(func() bool {
podLogs, LogErr := checkLogMessageInPod(oc, ns, "tailer", squidPods[0], "google.com")
o.Expect(LogErr).NotTo(o.HaveOccurred())
return strings.Contains(podLogs, expectedString)
}, "5m", "10s").Should(o.BeTrue(), "Connection to the readinessEndpoint under noProxy did not succeed!!")
})
| |||||
test
|
openshift/openshift-tests-private
|
e9e24bb0-b2c5-4c57-ab45-e338e66b930d
|
infw
|
import (
"context"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
package networking
import (
"context"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN infw", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-infw", exutil.KubeConfigPath())
opNamespace = "openshift-ingress-node-firewall"
opName = "ingress-node-firewall"
testDataDirMetallb = exutil.FixturePath("testdata", "networking/metallb")
)
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("This is required to run on OVNKubernetes Network Backened")
}
windowNodeList, err := exutil.GetAllNodesbyOSType(oc, "windows")
o.Expect(err).NotTo(o.HaveOccurred())
if len(windowNodeList) > 0 {
g.Skip("INFW usecases are not compatible to run on Cluster with window nodes")
}
//leveraging few templates and utils from metallb code
namespaceTemplate := filepath.Join(testDataDirMetallb, "namespace-template.yaml")
operatorGroupTemplate := filepath.Join(testDataDirMetallb, "operatorgroup-template.yaml")
subscriptionTemplate := filepath.Join(testDataDirMetallb, "subscription-template.yaml")
sub := subscriptionResource{
name: "ingress-node-firewall-sub",
namespace: opNamespace,
operatorName: opName,
catalog: "qe-app-registry",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
g.By("Making sure CRDs are also installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ingressnodefirewallconfigs.ingressnodefirewall.openshift.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ingressnodefirewallnodestates.ingressnodefirewall.openshift.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ingressnodefirewalls.ingressnodefirewall.openshift.io")).To(o.BeTrue())
})
g.It("Author:anusaxen-High-61481-LEVEL0-StagerunBoth-Ingress Node Firewall Operator Installation ", func() {
g.By("Checking Ingress Node Firewall operator and CRDs installation")
e2e.Logf("Operator install and CRDs check successfull!")
g.By("SUCCESS - Ingress Node Firewall operator and CRDs installed")
})
g.It("Author:anusaxen-WRS-High-54714-V-BR.53-Check Ingress Firewall Allow/Deny functionality for TCP via Nodeport svc [Serial][Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
platform := checkPlatform(oc)
if strings.Contains(platform, "vsphere") || ipStackType == "dualstack" || ipStackType == "ipv6single" {
g.By("Proceeding test on supported platform..")
} else {
g.Skip("Skip for un-expected platform, not vsphere or dualstack or ipv6single!")
}
g.By("Create a namespace for the scenario")
g.By("Obtain the namespace")
ns := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create a hello pod in ns")
pod := pingPodResourceNode{
name: "hello-pod1",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
g.By("Create a test service backing up the above pod")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "NodePort",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
//familyPolicy doesn't matter in this case
if ipStackType == "dualstack" {
svc.ipFamilyPolicy = "RequireDualStack"
} else {
svc.ipFamilyPolicy = "SingleStack"
}
svc.createServiceFromParams(oc)
g.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, "test-service", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range := nodePort + "-" + end_range
//Prior to creating blocking Ingress Node firewall for TCP nodeport svc, we will make sure that NodePort svc is accessible from another node (non pod node) say
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//get cluster default mgmt interface
primaryInf := getPrimaryNICname(oc)
//nodeIP1 and nodeIP2 will be IPv6 and IPv4 respectively in case of dual stack and IPv4/IPv6 in 2nd var case of single
nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[1].Name)
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-block-nport-tcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "tcp",
protocol_1: "TCP",
range_1: port_range,
action_1: "Deny",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: port_range,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
infwCR_single := infwCResource{
name: "infw-block-nport-tcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocol_1: "TCP",
protocoltype1: "tcp",
range_1: port_range,
action_1: "Deny",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: port_range,
action_2: "Allow",
template: infwCRtemplate,
}
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = nodeIP1 + "/128"
infwCR_multiple.src_cidr2 = nodeIP2 + "/32"
defer deleteinfwCR(oc, infwCR_multiple.name)
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = nodeIP2 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = nodeIP2 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
}
//based on above rule order 1 should execute and action deny should trigger so we expect CurlNodePortFail to execute sucessfully
CurlNodePortFail(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
//make sure events were logged for Deny events
infwDaemon := getinfwDaemonForNode(oc, nodeList.Items[0].Name)
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
//Now make action 1 as Allow and make sure it pass
infwCR_single.action_1 = "Allow"
infwCR_multiple.action_1 = "Allow"
if ipStackType == "dualstack" {
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
infwCR_single.createinfwCR(oc)
}
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
//Delete INFW components and wait for them to re-spawn and make sure CurlNodePortPass works again
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment", "ingress-node-firewall-controller-manager", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ds", "ingress-node-firewall-daemon", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
})
g.It("Author:anusaxen-WRS-High-54992-V-BR.53-Check Ingress Firewall Allow/Deny functionality for UDP via Nodeport svc [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
udpListenerPod = filepath.Join(buildPruningBaseDir, "udp-listener.yaml")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
platform := checkPlatform(oc)
if strings.Contains(platform, "vsphere") || ipStackType == "dualstack" || ipStackType == "ipv6single" {
g.By("Proceeding test on supported platform..")
} else {
g.Skip("Skip for un-expected platform, not vsphere or dualstack or ipv6single!")
}
g.By("Create a namespace for the scenario")
g.By("Obtain the namespace")
ns := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create UDP Listener Pod")
createResourceFromFile(oc, oc.Namespace(), udpListenerPod)
err = waitForPodWithLabelReady(oc, oc.Namespace(), "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "this pod with label name=udp-pod not ready")
var udpPodName []string
udpPodName = getPodName(oc, oc.Namespace(), "name=udp-pod")
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("pod", udpPodName[0], "-n", ns, "--type=NodePort", "--port=8080", "--protocol=UDP").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
podNodeName, getNodeNameErr := exutil.GetPodNodeName(oc, ns, udpPodName[0])
o.Expect(getNodeNameErr).NotTo(o.HaveOccurred())
masterNode, getMasterNodeErr := exutil.GetFirstMasterNode(oc) //let say this would act as a source node to reach to that exposed UDP service
o.Expect(getMasterNodeErr).NotTo(o.HaveOccurred())
g.By("Get service NodePort and NodeIP value")
//expose command will use same service name as pod name
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, udpPodName[0], "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range := nodePort + "-" + end_range
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//get cluster default mgmt interface
primaryInf := getPrimaryNICname(oc)
//nodeIP1 and nodeIP2 will be IPv6 and IPv4 respectively in case of dual stack and IPv4/IPv6 in 2nd var case of single. This is for src master node
nodeIP1, nodeIP2 := getNodeIP(oc, masterNode)
//nodeIP for podNode
_, podNodeIP := getNodeIP(oc, podNodeName)
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-block-nport-udp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "udp",
protocol_1: "UDP",
range_1: port_range,
action_1: "Deny",
protocoltype2: "udp",
protocol_2: "UDP",
range_2: port_range,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
infwCR_single := infwCResource{
name: "infw-block-nport-udp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocoltype1: "udp",
protocol_1: "UDP",
range_1: port_range,
action_1: "Deny",
protocoltype2: "udp",
protocol_2: "UDP",
range_2: port_range,
action_2: "Allow",
template: infwCRtemplate,
}
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = nodeIP1 + "/128"
infwCR_multiple.src_cidr2 = nodeIP2 + "/32"
defer deleteinfwCR(oc, infwCR_multiple.name)
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = nodeIP2 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = nodeIP2 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
}
g.By("send a hello message to udp listener from a master node to pod node")
cmd := "echo -n hello >/dev/udp/" + podNodeIP + "/" + nodePort
_, err = exutil.DebugNode(oc, masterNode, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
infwDaemon := getinfwDaemonForNode(oc, podNodeName)
//Now confirm by looking into infw daemon stats for that podNode whether drop stats are present which confirms packets were denied from master node src
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
//Now make action 1 as Allow and make sure it pass
infwCR_single.action_1 = "Allow"
infwCR_multiple.action_1 = "Allow"
//restart infw daemons to clear earlier stats and redeploy infwConfig()
restartInfwDaemons(oc)
if ipStackType == "dualstack" {
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
infwCR_single.createinfwCR(oc)
}
g.By("send a hello message to udp listener from a master node to pod node again")
cmd = "echo -n hello >/dev/udp/" + podNodeIP + "/" + nodePort
_, err = exutil.DebugNode(oc, masterNode, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
infwDaemon = getinfwDaemonForNode(oc, podNodeName)
_, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:anusaxen-ROSA-WRS-High-55411-V-BR.53-Check Ingress Firewall Allow/Deny functionality for ICMP [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCR_ICMP_template = filepath.Join(testDataDirInfw, "infw-icmp.yaml")
infwCR_ICMPv6_template = filepath.Join(testDataDirInfw, "infw-icmpv6.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
g.Skip("This case requires single stack cluster")
}
g.By("Create first namespace")
ns1 := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create a hello pod in first namespace")
podns1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns1.createPingPodNode(oc)
waitPodReady(oc, podns1.namespace, podns1.name)
g.By("Create Second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create a hello-pod on 2nd namesapce on different node")
podns2 := pingPodResourceNode{
name: "hello-pod",
namespace: ns2,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
podns2.createPingPodNode(oc)
waitPodReady(oc, podns2.namespace, podns2.name)
g.By("Get IP of the hello-pods")
hellopodIPns1, _ := getPodIP(oc, ns1, podns1.name)
hellopodIPns2, _ := getPodIP(oc, ns2, podns2.name)
//OVN geneve interface name
primaryInf := "genev_sys_6081"
infwCR_icmp := infwCResource_icmp{
name: "infw-block-icmp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr: "",
action_1: "Deny",
action_2: "Allow",
template: infwCR_ICMP_template,
}
infwCR_icmpv6 := infwCResource_icmp{
name: "infw-block-icmpv6",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr: "",
action_1: "Deny",
action_2: "Allow",
template: infwCR_ICMPv6_template,
}
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
var cmd []string
pingCmdv4 := "ping -c4 " + hellopodIPns2
pingCmdv6 := "ping6 -c4 " + hellopodIPns2
if ipStackType == "ipv6single" {
infwCR_icmpv6.src_cidr = hellopodIPns1 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
defer deleteinfwCR(oc, infwCR_icmpv6.name)
infwCR_icmpv6.createinfwICMP(oc)
cmd = []string{"-n", ns1, podns1.name, "--", "/bin/sh", "-c", pingCmdv6}
} else {
infwCR_icmp.src_cidr = hellopodIPns1 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
defer deleteinfwCR(oc, infwCR_icmp.name)
infwCR_icmp.createinfwICMP(oc)
cmd = []string{"-n", ns1, podns1.name, "--", "/bin/sh", "-c", pingCmdv4}
}
msg, _ := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
o.Expect(msg).To(o.ContainSubstring("100% packet loss"))
//make sure events were logged for Deny events
infwDaemon := getinfwDaemonForNode(oc, nodeList.Items[1].Name)
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
g.By("create infw CR for ICMP again with both Allow actions")
if ipStackType == "ipv6single" {
infwCR_icmpv6.action_1 = "Allow"
infwCR_icmpv6.createinfwICMP(oc)
} else {
infwCR_icmp.action_1 = "Allow"
infwCR_icmp.createinfwICMP(oc)
}
msg, _ = oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
o.Expect(msg).NotTo(o.ContainSubstring("100% packet loss"))
})
g.It("Author:anusaxen-Longduration-NonPreRelease-WRS-High-55410-V-BR.53-Check Ingress Firewall Allow/Deny functionality for SCTP [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
client_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[0].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp client pod in namespace")
createSCTPclientOnNode(oc, client_pod_pmtrs)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
server_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[1].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp server pod in namespace")
createSCTPserverOnNode(oc, server_pod_pmtrs)
err1 = waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err1, "sctpServerPod is not running")
//re-using SCTP testdata where nodePort value is hardcoded
nodePort := "30102"
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range := nodePort + "-" + end_range
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//OVN geneve interface name
primaryInf := "genev_sys_6081"
//get Pod IPs depending on clustertype
sctpClientPodIP1, sctpClientPodIP2 := getPodIP(oc, oc.Namespace(), sctpClientPodname)
//just interested in ServerPodIP1 as getPodIP stores IPv4, IPv6 and IPv4 address in 1st var for dualstack, single stack IPv6 and single stack IPv4 respectively
sctpServerPodIP1, _ := getPodIP(oc, oc.Namespace(), sctpServerPodName)
infwCR_single := infwCResource{
name: "infw-block-stcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocol_1: "SCTP",
protocoltype1: "sctp",
range_1: port_range,
action_1: "Allow",
protocoltype2: "sctp",
protocol_2: "SCTP",
range_2: port_range,
action_2: "Allow",
template: infwCRtemplate,
}
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-block-sctp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "sctp",
protocol_1: "SCTP",
range_1: port_range,
action_1: "Allow",
protocoltype2: "sctp",
protocol_2: "SCTP",
range_2: port_range,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 sctpClient addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP2 + "/32"
infwCR_multiple.src_cidr2 = sctpClientPodIP1 + "/128"
defer deleteinfwCR(oc, infwCR_multiple.name)
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
}
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type (for dual stack it would be IPv6)
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP1+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
//normally the process should end immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
//now lets make action 1 as Deny and make sure we expect error when we start test traffic
infwCR_single.action_1 = "Deny"
infwCR_multiple.action_1 = "Deny"
restartInfwDaemons(oc)
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 sctpClient addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP2 + "/32"
infwCR_multiple.src_cidr2 = sctpClientPodIP1 + "/128"
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
infwCR_single.createinfwCR(oc)
}
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err = e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type (for dual stack it would be IPv6)
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP1+" 30102 --sctp; }")
o.Expect(err1).To(o.HaveOccurred()) //this traffic should be denied based on later created infw policy
//make sure events were logged for Deny events post daemons restart at line 664, Ref.OCPBUGS-11888
podNodeName, getNodeNameErr := exutil.GetPodNodeName(oc, oc.Namespace(), "sctpserver")
o.Expect(getNodeNameErr).NotTo(o.HaveOccurred())
infwDaemon := getinfwDaemonForNode(oc, podNodeName)
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
})
g.It("Longduration-NonPreRelease-Author:anusaxen-Medium-54973-Make sure events and metrics are logged for ingress-node-firewall-daemon [Serial]", func() {
var (
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
g.By("Events are being monitored in testcases wherever applicable so we will make sure metrics are being relayed to concerned port")
worker_node, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
infwDaemon := getinfwDaemonForNode(oc, worker_node)
cmd := "curl 127.0.0.1:39301/metrics"
output, err := execCommandInSpecificPod(oc, "openshift-ingress-node-firewall", infwDaemon, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingressnodefirewall"))
})
g.It("Author:anusaxen-High-55414-Check multiple CIDRS with multiple rules functionality with Ingress Firewall Node Operator [Serial]", func() {
var (
buildPruningBaseDirSCTP = exutil.FixturePath("testdata", "networking/sctp")
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpModule = filepath.Join(buildPruningBaseDirSCTP, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
g.Skip("This case requires single stack cluster")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
client_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[0].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp client pod in namespace")
createSCTPclientOnNode(oc, client_pod_pmtrs)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
server_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[1].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp server pod in namespace")
createSCTPserverOnNode(oc, server_pod_pmtrs)
err1 = waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err1, "sctpServerPod is not running")
g.By("create a hello pod client in same namespace as of SCTP for TCP traffic check on same node as sctp client")
pod := pingPodResourceNode{
name: "hello-pod-client",
namespace: oc.Namespace(),
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
g.By("create a hello pod server in same namespace as of SCTP for TCP traffic check on same node as sctp server")
pod = pingPodResourceNode{
name: "hello-pod-server",
namespace: oc.Namespace(),
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
//re-using SCTP testdata where nodePort value is hardcoded
nodePort := "30102"
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range_sctp := nodePort + "-" + end_range
port_range_tcp := "8080-8081"
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//OVN geneve interface name
primaryInf := "genev_sys_6081"
//get Pod IPs depending on clustertype
sctpClientPodIP, _ := getPodIP(oc, oc.Namespace(), sctpClientPodname)
//just interested in ServerPodIP1 as getPodIP stores IPv4, IPv6 and IPv4 address in 1st var for dualstack, single stack IPv6 and single stack IPv4 respectively
sctpServerPodIP, _ := getPodIP(oc, oc.Namespace(), sctpServerPodName)
helloPodClientIP, _ := getPodIP(oc, oc.Namespace(), "hello-pod-client")
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-allow-sctp-tcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "sctp",
protocol_1: "SCTP",
range_1: port_range_sctp,
action_1: "Allow",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: port_range_tcp,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
if ipStackType == "ipv6single" {
g.By("Create Custom Resource for IPv6 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/128"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/128"
} else {
g.By("Create Custom Resource for IPv4 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/32"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/32"
}
defer deleteinfwCR(oc, "--all")
infwCR_multiple.createinfwCR_multiple_cidr(oc)
//check sctp traffic as per allow rule
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
//check tcp traffic as per allow rule
CurlPod2PodPass(oc, oc.Namespace(), "hello-pod-client", oc.Namespace(), "hello-pod-server")
//delete infw-allow-sctp-tcp CR created above
//using --all arg to delete all CR to make sure. This usecase has different CR names so defer delete/defer with spefic CR is not a great idea
deleteinfwCR(oc, "--all")
//Re-create CR with Deny rules now
infwCR_multiple.action_1 = "Deny"
infwCR_multiple.action_2 = "Deny"
infwCR_multiple.name = "infw-block-sctp-tcp"
if ipStackType == "ipv6single" {
g.By("Create Custom Resource for IPv6 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/128"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/128"
} else {
g.By("Create Custom Resource for IPv4 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/32"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/32"
}
infwCR_multiple.createinfwCR_multiple_cidr(oc)
//check sctp traffic as per Deny rule
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err = e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).To(o.HaveOccurred())
//check tcp traffic as per Deny rule
CurlPod2PodFail(oc, oc.Namespace(), "hello-pod-client", oc.Namespace(), "hello-pod-server")
})
g.It("Author:anusaxen-ROSA-High-73844-Check Ingress Node Firewall functionality for blocking SSH traffic [Serial]", func() {
var (
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
g.Skip("This case requires single stack cluster IPv4/IPv6")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//get cluster default mgmt interface
primaryInf := getPrimaryNICname(oc)
infwCR_single := infwCResource{
name: "infw-block-ssh",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocol_1: "TCP",
protocoltype1: "tcp",
range_1: "22", //ssh port
action_1: "Deny",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: "22",
action_2: "Allow",
template: infwCRtemplate,
}
if ipStackType == "ipv6single" {
//ssh traffic coming towards any worker node should be blocked
infwCR_single.src_cidr1 = "::/0"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
//ssh traffic coming towards any worker node should be blocked
infwCR_single.src_cidr1 = "0.0.0.0/0"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
//Identify the first master node to act as ssh source fo worker node
firstMasterNode, err := exutil.GetFirstMasterNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
sshcmd := "ssh -o ConnectTimeout=1 core@" + nodeList.Items[0].Name
sshOutput, _ := exutil.DebugNodeWithChroot(oc, firstMasterNode, "/bin/bash", "-c", sshcmd)
o.Expect(strings.Contains(sshOutput, "Connection timed out")).Should(o.BeTrue())
//get corresponding infw daemon pod for targeted worker
infwDaemon := getinfwDaemonForNode(oc, nodeList.Items[0].Name)
//make sure events were logged for ssh Deny
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
o.Expect(strings.Contains(output, "dstPort 22")).Should(o.BeTrue())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
16bd92b7-5872-403d-b5b3-dac15631b466
|
Author:anusaxen-High-61481-LEVEL0-StagerunBoth-Ingress Node Firewall Operator Installation
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-High-61481-LEVEL0-StagerunBoth-Ingress Node Firewall Operator Installation ", func() {
g.By("Checking Ingress Node Firewall operator and CRDs installation")
e2e.Logf("Operator install and CRDs check successfull!")
g.By("SUCCESS - Ingress Node Firewall operator and CRDs installed")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
c785b6a2-c4dd-4052-b240-0cc558a2731f
|
Author:anusaxen-WRS-High-54714-V-BR.53-Check Ingress Firewall Allow/Deny functionality for TCP via Nodeport svc [Serial][Disruptive]
|
['"context"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-WRS-High-54714-V-BR.53-Check Ingress Firewall Allow/Deny functionality for TCP via Nodeport svc [Serial][Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
platform := checkPlatform(oc)
if strings.Contains(platform, "vsphere") || ipStackType == "dualstack" || ipStackType == "ipv6single" {
g.By("Proceeding test on supported platform..")
} else {
g.Skip("Skip for un-expected platform, not vsphere or dualstack or ipv6single!")
}
g.By("Create a namespace for the scenario")
g.By("Obtain the namespace")
ns := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create a hello pod in ns")
pod := pingPodResourceNode{
name: "hello-pod1",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
g.By("Create a test service backing up the above pod")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "NodePort",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
//familyPolicy doesn't matter in this case
if ipStackType == "dualstack" {
svc.ipFamilyPolicy = "RequireDualStack"
} else {
svc.ipFamilyPolicy = "SingleStack"
}
svc.createServiceFromParams(oc)
g.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, "test-service", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range := nodePort + "-" + end_range
//Prior to creating blocking Ingress Node firewall for TCP nodeport svc, we will make sure that NodePort svc is accessible from another node (non pod node) say
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//get cluster default mgmt interface
primaryInf := getPrimaryNICname(oc)
//nodeIP1 and nodeIP2 will be IPv6 and IPv4 respectively in case of dual stack and IPv4/IPv6 in 2nd var case of single
nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[1].Name)
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-block-nport-tcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "tcp",
protocol_1: "TCP",
range_1: port_range,
action_1: "Deny",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: port_range,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
infwCR_single := infwCResource{
name: "infw-block-nport-tcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocol_1: "TCP",
protocoltype1: "tcp",
range_1: port_range,
action_1: "Deny",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: port_range,
action_2: "Allow",
template: infwCRtemplate,
}
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = nodeIP1 + "/128"
infwCR_multiple.src_cidr2 = nodeIP2 + "/32"
defer deleteinfwCR(oc, infwCR_multiple.name)
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = nodeIP2 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = nodeIP2 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
}
//based on above rule order 1 should execute and action deny should trigger so we expect CurlNodePortFail to execute sucessfully
CurlNodePortFail(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
//make sure events were logged for Deny events
infwDaemon := getinfwDaemonForNode(oc, nodeList.Items[0].Name)
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
//Now make action 1 as Allow and make sure it pass
infwCR_single.action_1 = "Allow"
infwCR_multiple.action_1 = "Allow"
if ipStackType == "dualstack" {
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
infwCR_single.createinfwCR(oc)
}
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
//Delete INFW components and wait for them to re-spawn and make sure CurlNodePortPass works again
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment", "ingress-node-firewall-controller-manager", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ds", "ingress-node-firewall-daemon", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
})
| |||||
test case
|
openshift/openshift-tests-private
|
798c113b-3522-46cf-a133-08e182cd0ab6
|
Author:anusaxen-WRS-High-54992-V-BR.53-Check Ingress Firewall Allow/Deny functionality for UDP via Nodeport svc [Serial]
|
['"context"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-WRS-High-54992-V-BR.53-Check Ingress Firewall Allow/Deny functionality for UDP via Nodeport svc [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
udpListenerPod = filepath.Join(buildPruningBaseDir, "udp-listener.yaml")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
platform := checkPlatform(oc)
if strings.Contains(platform, "vsphere") || ipStackType == "dualstack" || ipStackType == "ipv6single" {
g.By("Proceeding test on supported platform..")
} else {
g.Skip("Skip for un-expected platform, not vsphere or dualstack or ipv6single!")
}
g.By("Create a namespace for the scenario")
g.By("Obtain the namespace")
ns := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create UDP Listener Pod")
createResourceFromFile(oc, oc.Namespace(), udpListenerPod)
err = waitForPodWithLabelReady(oc, oc.Namespace(), "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "this pod with label name=udp-pod not ready")
var udpPodName []string
udpPodName = getPodName(oc, oc.Namespace(), "name=udp-pod")
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("pod", udpPodName[0], "-n", ns, "--type=NodePort", "--port=8080", "--protocol=UDP").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
podNodeName, getNodeNameErr := exutil.GetPodNodeName(oc, ns, udpPodName[0])
o.Expect(getNodeNameErr).NotTo(o.HaveOccurred())
masterNode, getMasterNodeErr := exutil.GetFirstMasterNode(oc) //let say this would act as a source node to reach to that exposed UDP service
o.Expect(getMasterNodeErr).NotTo(o.HaveOccurred())
g.By("Get service NodePort and NodeIP value")
//expose command will use same service name as pod name
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, udpPodName[0], "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range := nodePort + "-" + end_range
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//get cluster default mgmt interface
primaryInf := getPrimaryNICname(oc)
//nodeIP1 and nodeIP2 will be IPv6 and IPv4 respectively in case of dual stack and IPv4/IPv6 in 2nd var case of single. This is for src master node
nodeIP1, nodeIP2 := getNodeIP(oc, masterNode)
//nodeIP for podNode
_, podNodeIP := getNodeIP(oc, podNodeName)
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-block-nport-udp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "udp",
protocol_1: "UDP",
range_1: port_range,
action_1: "Deny",
protocoltype2: "udp",
protocol_2: "UDP",
range_2: port_range,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
infwCR_single := infwCResource{
name: "infw-block-nport-udp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocoltype1: "udp",
protocol_1: "UDP",
range_1: port_range,
action_1: "Deny",
protocoltype2: "udp",
protocol_2: "UDP",
range_2: port_range,
action_2: "Allow",
template: infwCRtemplate,
}
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = nodeIP1 + "/128"
infwCR_multiple.src_cidr2 = nodeIP2 + "/32"
defer deleteinfwCR(oc, infwCR_multiple.name)
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = nodeIP2 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = nodeIP2 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
}
g.By("send a hello message to udp listener from a master node to pod node")
cmd := "echo -n hello >/dev/udp/" + podNodeIP + "/" + nodePort
_, err = exutil.DebugNode(oc, masterNode, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
infwDaemon := getinfwDaemonForNode(oc, podNodeName)
//Now confirm by looking into infw daemon stats for that podNode whether drop stats are present which confirms packets were denied from master node src
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
//Now make action 1 as Allow and make sure it pass
infwCR_single.action_1 = "Allow"
infwCR_multiple.action_1 = "Allow"
//restart infw daemons to clear earlier stats and redeploy infwConfig()
restartInfwDaemons(oc)
if ipStackType == "dualstack" {
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
infwCR_single.createinfwCR(oc)
}
g.By("send a hello message to udp listener from a master node to pod node again")
cmd = "echo -n hello >/dev/udp/" + podNodeIP + "/" + nodePort
_, err = exutil.DebugNode(oc, masterNode, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
infwDaemon = getinfwDaemonForNode(oc, podNodeName)
_, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
dac75368-70da-4b93-8583-39e3ff7b47ad
|
Author:anusaxen-ROSA-WRS-High-55411-V-BR.53-Check Ingress Firewall Allow/Deny functionality for ICMP [Serial]
|
['"context"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-ROSA-WRS-High-55411-V-BR.53-Check Ingress Firewall Allow/Deny functionality for ICMP [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCR_ICMP_template = filepath.Join(testDataDirInfw, "infw-icmp.yaml")
infwCR_ICMPv6_template = filepath.Join(testDataDirInfw, "infw-icmpv6.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
g.Skip("This case requires single stack cluster")
}
g.By("Create first namespace")
ns1 := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create a hello pod in first namespace")
podns1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns1.createPingPodNode(oc)
waitPodReady(oc, podns1.namespace, podns1.name)
g.By("Create Second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create a hello-pod on 2nd namesapce on different node")
podns2 := pingPodResourceNode{
name: "hello-pod",
namespace: ns2,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
podns2.createPingPodNode(oc)
waitPodReady(oc, podns2.namespace, podns2.name)
g.By("Get IP of the hello-pods")
hellopodIPns1, _ := getPodIP(oc, ns1, podns1.name)
hellopodIPns2, _ := getPodIP(oc, ns2, podns2.name)
//OVN geneve interface name
primaryInf := "genev_sys_6081"
infwCR_icmp := infwCResource_icmp{
name: "infw-block-icmp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr: "",
action_1: "Deny",
action_2: "Allow",
template: infwCR_ICMP_template,
}
infwCR_icmpv6 := infwCResource_icmp{
name: "infw-block-icmpv6",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr: "",
action_1: "Deny",
action_2: "Allow",
template: infwCR_ICMPv6_template,
}
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
var cmd []string
pingCmdv4 := "ping -c4 " + hellopodIPns2
pingCmdv6 := "ping6 -c4 " + hellopodIPns2
if ipStackType == "ipv6single" {
infwCR_icmpv6.src_cidr = hellopodIPns1 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
defer deleteinfwCR(oc, infwCR_icmpv6.name)
infwCR_icmpv6.createinfwICMP(oc)
cmd = []string{"-n", ns1, podns1.name, "--", "/bin/sh", "-c", pingCmdv6}
} else {
infwCR_icmp.src_cidr = hellopodIPns1 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
defer deleteinfwCR(oc, infwCR_icmp.name)
infwCR_icmp.createinfwICMP(oc)
cmd = []string{"-n", ns1, podns1.name, "--", "/bin/sh", "-c", pingCmdv4}
}
msg, _ := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
o.Expect(msg).To(o.ContainSubstring("100% packet loss"))
//make sure events were logged for Deny events
infwDaemon := getinfwDaemonForNode(oc, nodeList.Items[1].Name)
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
g.By("create infw CR for ICMP again with both Allow actions")
if ipStackType == "ipv6single" {
infwCR_icmpv6.action_1 = "Allow"
infwCR_icmpv6.createinfwICMP(oc)
} else {
infwCR_icmp.action_1 = "Allow"
infwCR_icmp.createinfwICMP(oc)
}
msg, _ = oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
o.Expect(msg).NotTo(o.ContainSubstring("100% packet loss"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
e22b7e72-5a9a-4b07-a071-ddf89b102359
|
Author:anusaxen-Longduration-NonPreRelease-WRS-High-55410-V-BR.53-Check Ingress Firewall Allow/Deny functionality for SCTP [Serial]
|
['"context"', '"path/filepath"', '"strconv"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-Longduration-NonPreRelease-WRS-High-55410-V-BR.53-Check Ingress Firewall Allow/Deny functionality for SCTP [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
client_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[0].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp client pod in namespace")
createSCTPclientOnNode(oc, client_pod_pmtrs)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
server_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[1].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp server pod in namespace")
createSCTPserverOnNode(oc, server_pod_pmtrs)
err1 = waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err1, "sctpServerPod is not running")
//re-using SCTP testdata where nodePort value is hardcoded
nodePort := "30102"
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range := nodePort + "-" + end_range
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//OVN geneve interface name
primaryInf := "genev_sys_6081"
//get Pod IPs depending on clustertype
sctpClientPodIP1, sctpClientPodIP2 := getPodIP(oc, oc.Namespace(), sctpClientPodname)
//just interested in ServerPodIP1 as getPodIP stores IPv4, IPv6 and IPv4 address in 1st var for dualstack, single stack IPv6 and single stack IPv4 respectively
sctpServerPodIP1, _ := getPodIP(oc, oc.Namespace(), sctpServerPodName)
infwCR_single := infwCResource{
name: "infw-block-stcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocol_1: "SCTP",
protocoltype1: "sctp",
range_1: port_range,
action_1: "Allow",
protocoltype2: "sctp",
protocol_2: "SCTP",
range_2: port_range,
action_2: "Allow",
template: infwCRtemplate,
}
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-block-sctp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "sctp",
protocol_1: "SCTP",
range_1: port_range,
action_1: "Allow",
protocoltype2: "sctp",
protocol_2: "SCTP",
range_2: port_range,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 sctpClient addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP2 + "/32"
infwCR_multiple.src_cidr2 = sctpClientPodIP1 + "/128"
defer deleteinfwCR(oc, infwCR_multiple.name)
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
}
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type (for dual stack it would be IPv6)
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP1+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
//normally the process should end immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
//now lets make action 1 as Deny and make sure we expect error when we start test traffic
infwCR_single.action_1 = "Deny"
infwCR_multiple.action_1 = "Deny"
restartInfwDaemons(oc)
if ipStackType == "dualstack" {
g.By("create infw CR with multiple cidrs containing both IPv4 and IPv6 sctpClient addresses")
g.By("create Ingress node firewall Rule for dual stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP2 + "/32"
infwCR_multiple.src_cidr2 = sctpClientPodIP1 + "/128"
infwCR_multiple.createinfwCR_multiple_cidr(oc)
} else {
if ipStackType == "ipv6single" {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/128"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
infwCR_single.src_cidr1 = sctpClientPodIP1 + "/32"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
infwCR_single.createinfwCR(oc)
}
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err = e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type (for dual stack it would be IPv6)
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP1+" 30102 --sctp; }")
o.Expect(err1).To(o.HaveOccurred()) //this traffic should be denied based on later created infw policy
//make sure events were logged for Deny events post daemons restart at line 664, Ref.OCPBUGS-11888
podNodeName, getNodeNameErr := exutil.GetPodNodeName(oc, oc.Namespace(), "sctpserver")
o.Expect(getNodeNameErr).NotTo(o.HaveOccurred())
infwDaemon := getinfwDaemonForNode(oc, podNodeName)
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
2d6bc58b-3649-406d-96da-00c79738c490
|
Longduration-NonPreRelease-Author:anusaxen-Medium-54973-Make sure events and metrics are logged for ingress-node-firewall-daemon [Serial]
|
['"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Longduration-NonPreRelease-Author:anusaxen-Medium-54973-Make sure events and metrics are logged for ingress-node-firewall-daemon [Serial]", func() {
var (
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
g.By("Events are being monitored in testcases wherever applicable so we will make sure metrics are being relayed to concerned port")
worker_node, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
infwDaemon := getinfwDaemonForNode(oc, worker_node)
cmd := "curl 127.0.0.1:39301/metrics"
output, err := execCommandInSpecificPod(oc, "openshift-ingress-node-firewall", infwDaemon, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingressnodefirewall"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
95d00858-8735-41d9-855c-93b2210e4fe8
|
Author:anusaxen-High-55414-Check multiple CIDRS with multiple rules functionality with Ingress Firewall Node Operator [Serial]
|
['"context"', '"path/filepath"', '"strconv"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-High-55414-Check multiple CIDRS with multiple rules functionality with Ingress Firewall Node Operator [Serial]", func() {
var (
buildPruningBaseDirSCTP = exutil.FixturePath("testdata", "networking/sctp")
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpModule = filepath.Join(buildPruningBaseDirSCTP, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCR_multiple_cidr_template = filepath.Join(testDataDirInfw, "infw-multiple-cidr.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
g.Skip("This case requires single stack cluster")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
client_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[0].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp client pod in namespace")
createSCTPclientOnNode(oc, client_pod_pmtrs)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
server_pod_pmtrs := map[string]string{
"$nodename": nodeList.Items[1].Name,
"$namespace": oc.Namespace(),
}
g.By("creating sctp server pod in namespace")
createSCTPserverOnNode(oc, server_pod_pmtrs)
err1 = waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err1, "sctpServerPod is not running")
g.By("create a hello pod client in same namespace as of SCTP for TCP traffic check on same node as sctp client")
pod := pingPodResourceNode{
name: "hello-pod-client",
namespace: oc.Namespace(),
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
g.By("create a hello pod server in same namespace as of SCTP for TCP traffic check on same node as sctp server")
pod = pingPodResourceNode{
name: "hello-pod-server",
namespace: oc.Namespace(),
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
//re-using SCTP testdata where nodePort value is hardcoded
nodePort := "30102"
//we need a port range to evaluate rule properly, say if nodeport is 33000, we would try a range of 33000-33005 which port_range var will store
var intvar int
var end_range string
intvar, parseIntErr := strconv.Atoi(nodePort)
o.Expect(parseIntErr).NotTo(o.HaveOccurred())
end_range = strconv.Itoa(intvar + 5)
port_range_sctp := nodePort + "-" + end_range
port_range_tcp := "8080-8081"
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//OVN geneve interface name
primaryInf := "genev_sys_6081"
//get Pod IPs depending on clustertype
sctpClientPodIP, _ := getPodIP(oc, oc.Namespace(), sctpClientPodname)
//just interested in ServerPodIP1 as getPodIP stores IPv4, IPv6 and IPv4 address in 1st var for dualstack, single stack IPv6 and single stack IPv4 respectively
sctpServerPodIP, _ := getPodIP(oc, oc.Namespace(), sctpServerPodName)
helloPodClientIP, _ := getPodIP(oc, oc.Namespace(), "hello-pod-client")
infwCR_multiple := infwCResource_multiple_cidr{
name: "infw-allow-sctp-tcp",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
src_cidr2: "",
protocoltype1: "sctp",
protocol_1: "SCTP",
range_1: port_range_sctp,
action_1: "Allow",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: port_range_tcp,
action_2: "Allow",
template: infwCR_multiple_cidr_template,
}
if ipStackType == "ipv6single" {
g.By("Create Custom Resource for IPv6 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/128"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/128"
} else {
g.By("Create Custom Resource for IPv4 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/32"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/32"
}
defer deleteinfwCR(oc, "--all")
infwCR_multiple.createinfwCR_multiple_cidr(oc)
//check sctp traffic as per allow rule
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
//check tcp traffic as per allow rule
CurlPod2PodPass(oc, oc.Namespace(), "hello-pod-client", oc.Namespace(), "hello-pod-server")
//delete infw-allow-sctp-tcp CR created above
//using --all arg to delete all CR to make sure. This usecase has different CR names so defer delete/defer with spefic CR is not a great idea
deleteinfwCR(oc, "--all")
//Re-create CR with Deny rules now
infwCR_multiple.action_1 = "Deny"
infwCR_multiple.action_2 = "Deny"
infwCR_multiple.name = "infw-block-sctp-tcp"
if ipStackType == "ipv6single" {
g.By("Create Custom Resource for IPv6 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/128"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/128"
} else {
g.By("Create Custom Resource for IPv4 single stack")
infwCR_multiple.src_cidr1 = sctpClientPodIP + "/32"
infwCR_multiple.src_cidr2 = helloPodClientIP + "/32"
}
infwCR_multiple.createinfwCR_multiple_cidr(oc)
//check sctp traffic as per Deny rule
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err = oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
//normally the process should start immediately but we have seen 1-2 seconds delay using ncat-sctp under such circumstances so keeping 5 sec to make sure
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err = e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
//sctpServerPodIP1 will be either IPv6 or IPv4 according to cluster type
g.By("sctpclient pod start to send sctp traffic")
_, err1 = e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'test traffic' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).To(o.HaveOccurred())
//check tcp traffic as per Deny rule
CurlPod2PodFail(oc, oc.Namespace(), "hello-pod-client", oc.Namespace(), "hello-pod-server")
})
| |||||
test case
|
openshift/openshift-tests-private
|
a0bc0bfa-7965-4196-914e-f732bd70411e
|
Author:anusaxen-ROSA-High-73844-Check Ingress Node Firewall functionality for blocking SSH traffic [Serial]
|
['"context"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw.go
|
g.It("Author:anusaxen-ROSA-High-73844-Check Ingress Node Firewall functionality for blocking SSH traffic [Serial]", func() {
var (
testDataDirInfw = exutil.FixturePath("testdata", "networking/ingressnodefirewall")
infwCRtemplate = filepath.Join(testDataDirInfw, "infw.yaml")
infwCfgTemplate = filepath.Join(testDataDirInfw, "infw-config.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
g.Skip("This case requires single stack cluster IPv4/IPv6")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create Ingress node firewall config")
infwCfg := infwConfigResource{
namespace: "openshift-ingress-node-firewall",
nodelabel: "node-role.kubernetes.io/worker",
template: infwCfgTemplate,
}
defer deleteinfwCfg(oc)
infwCfg.createinfwConfig(oc)
waitforInfwDaemonsready(oc)
//get cluster default mgmt interface
primaryInf := getPrimaryNICname(oc)
infwCR_single := infwCResource{
name: "infw-block-ssh",
primary_inf: primaryInf,
nodelabel: "node-role.kubernetes.io/worker",
src_cidr1: "",
protocol_1: "TCP",
protocoltype1: "tcp",
range_1: "22", //ssh port
action_1: "Deny",
protocoltype2: "tcp",
protocol_2: "TCP",
range_2: "22",
action_2: "Allow",
template: infwCRtemplate,
}
if ipStackType == "ipv6single" {
//ssh traffic coming towards any worker node should be blocked
infwCR_single.src_cidr1 = "::/0"
g.By("create Ingress node firewall Rule Custom Resource for IPv6 single stack")
} else {
//ssh traffic coming towards any worker node should be blocked
infwCR_single.src_cidr1 = "0.0.0.0/0"
g.By("create Ingress node firewall Rule Custom Resource for IPv4 single stack")
}
defer deleteinfwCR(oc, infwCR_single.name)
infwCR_single.createinfwCR(oc)
//Identify the first master node to act as ssh source fo worker node
firstMasterNode, err := exutil.GetFirstMasterNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
sshcmd := "ssh -o ConnectTimeout=1 core@" + nodeList.Items[0].Name
sshOutput, _ := exutil.DebugNodeWithChroot(oc, firstMasterNode, "/bin/bash", "-c", sshcmd)
o.Expect(strings.Contains(sshOutput, "Connection timed out")).Should(o.BeTrue())
//get corresponding infw daemon pod for targeted worker
infwDaemon := getinfwDaemonForNode(oc, nodeList.Items[0].Name)
//make sure events were logged for ssh Deny
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", "openshift-ingress-node-firewall", infwDaemon, "-c", "events").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ruleId 1 action Drop")).Should(o.BeTrue())
o.Expect(strings.Contains(output, "dstPort 22")).Should(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
b8eace8f-2a03-4caf-b228-301077dd4dd9
|
ipv6_misc
|
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipv6_misc.go
|
package networking
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-networking] SDN misc", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-ipv6", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := exutil.CheckNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("This case requires OVNKubernetes as network plugin, skip the test as the cluster does not have OVN network plugin")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-High-55193-Dual stack cluster fails on installation when multi-path routing entries exist. [Disruptive]", func() {
// Customer bug https://issues.redhat.com/browse/OCPBUGS-1318
ipStackType := checkIPStackType(oc)
g.By("Skip testing on ipv4 or ipv6 single stack cluster")
if ipStackType == "ipv4single" || ipStackType == "ipv6single" {
g.Skip("The case only can be run on dualstack cluster , skip for single stack cluster!!!")
}
g.By("Test on dualstack cluster")
if ipStackType == "dualstack" {
ns := "openshift-ovn-kubernetes"
g.By("Create multihop routes in one of ovnkubenode")
workerNode, nodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
ovnkubePod, podErr := exutil.GetPodName(oc, ns, "app=ovnkube-node", workerNode)
o.Expect(podErr).NotTo(o.HaveOccurred())
_, routeErr1 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r flush default")
o.Expect(routeErr1).NotTo(o.HaveOccurred())
defaultRoute1, routeErr2 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r show default")
o.Expect(routeErr2).NotTo(o.HaveOccurred())
o.Expect(defaultRoute1).To(o.ContainSubstring(""))
_, routeErr3 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r add default metric 48 nexthop via fe80::cee1:9402:8c35:be41 dev br-ex nexthop via fe80::cee1:9402:8c35:be42 dev br-ex")
o.Expect(routeErr3).NotTo(o.HaveOccurred())
defaultRoute2, routeErr4 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r show default")
o.Expect(routeErr4).NotTo(o.HaveOccurred())
o.Expect(defaultRoute2).To(o.ContainSubstring("nexthop via fe80::cee1:9402:8c35:be42 dev br-ex weight 1"))
o.Expect(defaultRoute2).To(o.ContainSubstring("nexthop via fe80::cee1:9402:8c35:be41 dev br-ex weight 1"))
g.By("Delete this ovnkubenode pod and restart a new one")
delErr := oc.WithoutNamespace().AsAdmin().Run("delete").Args("pod", ovnkubePod, "-n", ns).Execute()
o.Expect(delErr).NotTo(o.HaveOccurred())
podName, podErr1 := oc.AsAdmin().Run("get").Args("-n", ns, "pod", "-l=app=ovnkube-node", "--sort-by=metadata.creationTimestamp", "-o=jsonpath={.items[-1:].metadata.name}").Output()
o.Expect(podErr1).NotTo(o.HaveOccurred())
waitPodReady(oc, ns, podName)
g.By("Get correct log information about default gateway from the new ovnkubenode pod")
expectedString := "Found default gateway interface br-ex fe80::cee1:9402:8c35:be41"
podLogs, LogErr := checkLogMessageInPod(oc, ns, "ovnkube-node", podName, "'"+expectedString+"'"+"| tail -1")
o.Expect(LogErr).NotTo(o.HaveOccurred())
o.Expect(podLogs).To(o.ContainSubstring(expectedString))
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
7bce9ad0-5308-4f79-91d3-baca9bd90050
|
NonHyperShiftHOST-Author:weliang-High-55193-Dual stack cluster fails on installation when multi-path routing entries exist. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipv6_misc.go
|
g.It("NonHyperShiftHOST-Author:weliang-High-55193-Dual stack cluster fails on installation when multi-path routing entries exist. [Disruptive]", func() {
// Customer bug https://issues.redhat.com/browse/OCPBUGS-1318
ipStackType := checkIPStackType(oc)
g.By("Skip testing on ipv4 or ipv6 single stack cluster")
if ipStackType == "ipv4single" || ipStackType == "ipv6single" {
g.Skip("The case only can be run on dualstack cluster , skip for single stack cluster!!!")
}
g.By("Test on dualstack cluster")
if ipStackType == "dualstack" {
ns := "openshift-ovn-kubernetes"
g.By("Create multihop routes in one of ovnkubenode")
workerNode, nodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
ovnkubePod, podErr := exutil.GetPodName(oc, ns, "app=ovnkube-node", workerNode)
o.Expect(podErr).NotTo(o.HaveOccurred())
_, routeErr1 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r flush default")
o.Expect(routeErr1).NotTo(o.HaveOccurred())
defaultRoute1, routeErr2 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r show default")
o.Expect(routeErr2).NotTo(o.HaveOccurred())
o.Expect(defaultRoute1).To(o.ContainSubstring(""))
_, routeErr3 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r add default metric 48 nexthop via fe80::cee1:9402:8c35:be41 dev br-ex nexthop via fe80::cee1:9402:8c35:be42 dev br-ex")
o.Expect(routeErr3).NotTo(o.HaveOccurred())
defaultRoute2, routeErr4 := execCommandInSpecificPod(oc, ns, ovnkubePod, "ip -6 r show default")
o.Expect(routeErr4).NotTo(o.HaveOccurred())
o.Expect(defaultRoute2).To(o.ContainSubstring("nexthop via fe80::cee1:9402:8c35:be42 dev br-ex weight 1"))
o.Expect(defaultRoute2).To(o.ContainSubstring("nexthop via fe80::cee1:9402:8c35:be41 dev br-ex weight 1"))
g.By("Delete this ovnkubenode pod and restart a new one")
delErr := oc.WithoutNamespace().AsAdmin().Run("delete").Args("pod", ovnkubePod, "-n", ns).Execute()
o.Expect(delErr).NotTo(o.HaveOccurred())
podName, podErr1 := oc.AsAdmin().Run("get").Args("-n", ns, "pod", "-l=app=ovnkube-node", "--sort-by=metadata.creationTimestamp", "-o=jsonpath={.items[-1:].metadata.name}").Output()
o.Expect(podErr1).NotTo(o.HaveOccurred())
waitPodReady(oc, ns, podName)
g.By("Get correct log information about default gateway from the new ovnkubenode pod")
expectedString := "Found default gateway interface br-ex fe80::cee1:9402:8c35:be41"
podLogs, LogErr := checkLogMessageInPod(oc, ns, "ovnkube-node", podName, "'"+expectedString+"'"+"| tail -1")
o.Expect(LogErr).NotTo(o.HaveOccurred())
o.Expect(podLogs).To(o.ContainSubstring(expectedString))
}
})
| ||||||
test
|
openshift/openshift-tests-private
|
a6031677-1bde-4730-b977-499bcb0a88cd
|
metrics
|
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
package networking
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN metrics", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-metrics", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-47524-Metrics for ovn-appctl stopwatch/show command.", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
prometheusURL := "localhost:29105/metrics"
metricName1 := "ovn_controller_if_status_mgr_run_total_samples"
metricName2 := "ovn_controller_if_status_mgr_run_long_term_avg"
metricName3 := "ovn_controller_bfd_run_total_samples"
metricName4 := "ovn_controller_bfd_run_long_term_avg"
metricName5 := "ovn_controller_flow_installation_total_samples"
metricName6 := "ovn_controller_flow_installation_long_term_avg"
metricName7 := "ovn_controller_if_status_mgr_run_total_samples"
metricName8 := "ovn_controller_if_status_mgr_run_long_term_avg"
metricName9 := "ovn_controller_if_status_mgr_update_total_samples"
metricName10 := "ovn_controller_if_status_mgr_update_long_term_avg"
metricName11 := "ovn_controller_flow_generation_total_samples"
metricName12 := "ovn_controller_flow_generation_long_term_avg"
metricName13 := "ovn_controller_pinctrl_run_total_samples"
metricName14 := "ovn_controller_pinctrl_run_long_term_avg"
metricName15 := "ovn_controller_ofctrl_seqno_run_total_samples"
metricName16 := "ovn_controller_ofctrl_seqno_run_long_term_avg"
metricName17 := "ovn_controller_patch_run_total_samples"
metricName18 := "ovn_controller_patch_run_long_term_avg"
metricName19 := "ovn_controller_ct_zone_commit_total_samples"
metricName20 := "ovn_controller_ct_zone_commit_long_term_avg"
metricName := []string{metricName1, metricName2, metricName3, metricName4, metricName5, metricName6, metricName7, metricName8, metricName9, metricName10, metricName11, metricName12, metricName13, metricName14, metricName15, metricName16, metricName17, metricName18, metricName19, metricName20}
for _, value := range metricName {
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, value)
if metricValue != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", value)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
}
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-47471-Record update to cache versus port binding.", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
metricName1 := "ovnkube_controller_pod_first_seen_lsp_created_duration_seconds_count"
metricName2 := "ovnkube_controller_pod_lsp_created_port_binding_duration_seconds_count"
metricName3 := "ovnkube_controller_pod_port_binding_port_binding_chassis_duration_seconds_count"
metricName4 := "ovnkube_controller_pod_port_binding_chassis_port_binding_up_duration_seconds_count"
prometheusURL := "localhost:29103/metrics"
metricName := []string{metricName1, metricName2, metricName3, metricName4}
for _, value := range metricName {
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, value)
if metricValue != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", value)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
}
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-45841-Add OVN flow count metric.", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
prometheusURL := "localhost:29105/metrics"
metricName := "ovn_controller_integration_bridge_openflow_total"
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
if metricValue != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-45688-Metrics for egress firewall. [Disruptive]", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/metrics")
egressFirewall = filepath.Join(buildPruningBaseDir, "OVN-Rules.yaml")
)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
var metricValue1 string
var metricValue2 string
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
metricName := "ovnkube_controller_num_egress_firewall_rules"
prometheusURL := "localhost:29103/metrics"
exutil.By("get the metrics of ovnkube_controller_num_egress_firewall_rules before configuration")
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue1 = getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
e2e.Logf("The output of the ovnkube_master_num_egress_firewall_rules metrics before applying egressfirewall rules is : %v", metricValue1)
if metricValue1 >= "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
exutil.By("create egressfirewall rules in OVN cluster")
fwErr := oc.AsAdmin().Run("create").Args("-n", ns, "-f", egressFirewall).Execute()
o.Expect(fwErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-n", ns, "-f", egressFirewall).Execute()
fwOutput, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("egressfirewall", "-n", ns).Output()
o.Expect(fwOutput).To(o.ContainSubstring("EgressFirewall Rules applied"))
metricsOutputAfter := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue2 = getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
e2e.Logf("The output of the ovnkube_master_num_egress_firewall_rules metrics after applying egressfirewall rules is : %v", metricValue1)
metricValue1Int, _ := strconv.Atoi(metricValue1)
metricValue2Int, _ := strconv.Atoi(metricValue2)
if metricValue2Int == metricValue1Int+3 {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutputAfter, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-45842-Metrics for IPSec enabled/disabled", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
ipsecState := checkIPsec(oc)
e2e.Logf("The ipsec state is : %v", ipsecState)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
prometheusURL := "localhost:29103/metrics"
metricName := "ovnkube_controller_ipsec_enabled"
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
e2e.Logf("The output of the ovnkube_controller_ipsec_enabled metrics is : %v", metricValue)
if metricValue == "1" && (ipsecState == "{}" || ipsecState == "Full") {
e2e.Logf("The IPsec is enabled in the cluster")
return true, nil
} else if metricValue == "0" && (ipsecState == "Disabled" || ipsecState == "External") {
e2e.Logf("The IPsec is disabled in the cluster")
return true, nil
} else {
e2e.Failf("Testing fail to get the correct metrics of ovnkube_controller_ipsec_enabled")
return false, nil
}
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-45687-Metrics for egress router", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/metrics")
egressrouterPod = filepath.Join(buildPruningBaseDir, "egressrouter.yaml")
)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("create a test pod")
podErr1 := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", egressrouterPod, "-n", ns).Execute()
o.Expect(podErr1).NotTo(o.HaveOccurred())
podErr2 := waitForPodWithLabelReady(oc, oc.Namespace(), "app=egress-router-cni")
exutil.AssertWaitPollNoErr(podErr2, "egressrouterPod is not running")
podName := getPodName(oc, "openshift-multus", "app=multus-admission-controller")
output, err := oc.AsAdmin().Run("exec").Args("-n", "openshift-multus", podName[0], "--", "curl", "localhost:9091/metrics").OutputToFile("metrics.txt")
o.Expect(err).NotTo(o.HaveOccurred())
metricOutput, _ := exec.Command("bash", "-c", "cat "+output+" | grep egress-router | awk '{print $2}'").Output()
metricValue := strings.TrimSpace(string(metricOutput))
e2e.Logf("The output of the egress-router metrics is : %v", metricValue)
o.Expect(metricValue).To(o.ContainSubstring("1"))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-45685-Metrics for Metrics for egressIP. [Disruptive]", func() {
var (
ovncmName = "kube-rbac-proxy"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
)
platform := checkPlatform(oc)
if !strings.Contains(platform, "vsphere") {
g.Skip("Skip for un-expected platform, egreeIP testing need to be executed on a vsphere cluster!")
}
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
podName := getOVNKMasterPod(oc)
metricName := "ovnkube_clustermanager_num_egress_ips"
prometheusURL := "localhost:29108/metrics"
exutil.By("get the metrics of ovnkube_controller_num_egress_firewall_rules before configuration")
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
if metricValue == "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
exutil.By("Label EgressIP node")
var EgressNodeLabel = "k8s.ovn.org/egress-assignable"
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
if err != nil {
e2e.Logf("Unexpected error occurred: %v", err)
}
exutil.By("Apply EgressLabel Key on one node.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, nodeList.Items[0].Name, EgressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeList.Items[0].Name, EgressNodeLabel, "true")
exutil.By("Apply label to namespace")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Output()
_, err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create an egressip object")
sub1, _ := getDefaultSubnet(oc)
ips := findUnUsedIPs(oc, sub1, 2)
egressip1 := egressIPResource1{
name: "egressip-45685",
template: egressIPTemplate,
egressIP1: ips[0],
egressIP2: ips[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
exutil.By("get the metrics of ovnkube_controller_num_egress_firewall_rules after configuration")
metricsOutputAfter := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
if metricValue == "1" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutputAfter, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutputAfter))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-45689-Metrics for idling enable/disabled.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "metrics/metrics-pod.yaml")
testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
testPodName = "hello-pod"
)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("get controller-managert service ip address")
managertServiceIP := getControllerManagerLeaderIP(oc)
svcURL := net.JoinHostPort(managertServiceIP, "8443")
prometheusURL := "https://" + svcURL + "/metrics"
var metricNumber string
metricsErr := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output := getOVNMetrics(oc, prometheusURL)
metricOutput, _ := exec.Command("bash", "-c", "cat "+output+" | grep openshift_unidle_events_total | awk 'NR==3{print $2}'").Output()
metricNumber = strings.TrimSpace(string(metricOutput))
e2e.Logf("The output of openshift_unidle_events metrics is : %v", metricNumber)
if metricNumber != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics of openshift_unidle_events and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr))
exutil.By("create a service")
createResourceFromFile(oc, ns, testSvcFile)
ServiceOutput, serviceErr := oc.WithoutNamespace().Run("get").Args("service", "-n", ns).Output()
o.Expect(serviceErr).NotTo(o.HaveOccurred())
o.Expect(ServiceOutput).To(o.ContainSubstring("test-service"))
exutil.By("create a test pod")
createResourceFromFile(oc, ns, testPodFile)
podErr := waitForPodWithLabelReady(oc, ns, "name=hello-pod")
exutil.AssertWaitPollNoErr(podErr, "hello-pod is not running")
exutil.By("get test service ip address")
testServiceIP, _ := getSvcIP(oc, ns, "test-service") //This case is check metrics not svc testing, do not need use test-service dual-stack address
dstURL := net.JoinHostPort(testServiceIP, "27017")
exutil.By("test-pod can curl service ip address:port")
_, svcerr1 := e2eoutput.RunHostCmd(ns, testPodName, "curl -connect-timeout 5 -s "+dstURL)
o.Expect(svcerr1).NotTo(o.HaveOccurred())
exutil.By("idle test-service")
_, idleerr := oc.Run("idle").Args("-n", ns, "test-service").Output()
o.Expect(idleerr).NotTo(o.HaveOccurred())
exutil.By("test pod can curl service address:port again to unidle the svc")
//Need curl serverice several times, otherwise casue curl: (7) Failed to connect to 172.30.248.18 port 27017
//after 0 ms: Connection refused\ncommand terminated with exit code 7\n\nerror:\nexit status 7"
for i := 0; i < 3; i++ {
e2eoutput.RunHostCmd(ns, testPodName, "curl -connect-timeout 5 -s "+dstURL)
}
_, svcerr2 := e2eoutput.RunHostCmd(ns, testPodName, "curl -connect-timeout 5 -s "+dstURL)
o.Expect(svcerr2).NotTo(o.HaveOccurred())
//Because Bug 2064786: Not always can get the metrics of openshift_unidle_events_total
//Need curl several times to get the metrics of openshift_unidle_events_total
metricsOutput := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output := getOVNMetrics(oc, prometheusURL)
metricOutput, _ := exec.Command("bash", "-c", "cat "+output+" | grep openshift_unidle_events_total | awk 'NR==3{print $2}'").Output()
metricValue := strings.TrimSpace(string(metricOutput))
e2e.Logf("The output of openshift_unidle_events metrics is : %v", metricValue)
if !strings.Contains(metricValue, metricNumber) {
return true, nil
}
e2e.Logf("Can't get correct metrics of openshift_unidle_events and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-52072- Add mechanism to record duration for k8 kinds.", func() {
var (
namespace = "openshift-ovn-kubernetes"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
leaderNodeIP := getPodIPv4(oc, namespace, podName)
ip := net.ParseIP(leaderNodeIP)
var prometheusURL string
if ip.To4() == nil {
prometheusURL = "https://[" + leaderNodeIP + "]:9103/metrics"
} else {
prometheusURL = "https://" + leaderNodeIP + ":9103/metrics"
}
metricName1 := "ovnkube_controller_network_programming_ovn_duration_seconds_bucket"
metricName2 := "ovnkube_controller_network_programming_duration_seconds_bucket"
checkovnkubeMasterNetworkProgrammingetrics(oc, prometheusURL, metricName1)
checkovnkubeMasterNetworkProgrammingetrics(oc, prometheusURL, metricName2)
})
g.It("Author:qiowang-Medium-53969-Verify OVN controller SB DB connection status metric works [Disruptive]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovn_controller_southbound_database_connected"
)
nodes, getNodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath='{.items[*].metadata.name}'").Output()
nodeName := strings.Split(strings.Trim(nodes, "'"), " ")[0]
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
podName, getPodNameErr := exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
exutil.By("1. Restart pod " + podName + " in " + namespace + " to make the pod logs clear")
delPodErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", podName, "-n", namespace, "--ignore-not-found=true").Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
podName, getPodNameErr = exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
waitPodReady(oc, namespace, podName)
exutil.By("2. Get the metrics of " + metricName + " when ovn controller connected to SB DB")
prometheusURL := "localhost:29105/metrics"
containerName := "kube-rbac-proxy-ovn-metrics"
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
if metricValue == "1" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
exutil.By("3. remove ovnsb_db.sock and restart ovn controller process to disconnect socket from ovn controller to SB DB")
defer func() {
deferErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", podName, "-n", namespace, "--ignore-not-found=true").Execute()
o.Expect(deferErr).NotTo(o.HaveOccurred())
podName, getPodNameErr = exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
waitPodReady(oc, namespace, podName)
}()
_, rmErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, "-c", "ovn-controller", podName, "--", "rm", "-f", "/var/run/ovn/ovnsb_db.sock").Output()
o.Expect(rmErr).NotTo(o.HaveOccurred())
getPid, getErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, "-c", "ovn-controller", podName, "--", "cat", "/var/run/ovn/ovn-controller.pid").Output()
o.Expect(getErr).NotTo(o.HaveOccurred())
pid := strings.Split(getPid, "\n")[0]
_, killErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, "-c", "ovn-controller", podName, "--", "kill", "-9", pid).Output()
o.Expect(killErr).NotTo(o.HaveOccurred())
exutil.By("4. Waiting for ovn controller disconnected to SB DB")
_, getLogErr := exutil.WaitAndGetSpecificPodLogs(oc, namespace, "ovn-controller", podName, "\"/var/run/ovn/ovnsb_db.sock: continuing to reconnect in the background\"")
o.Expect(getLogErr).NotTo(o.HaveOccurred())
exutil.By("5. Get the metrics of " + metricName + " when ovn controller disconnected to SB DB")
metricsOutput1 := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
if metricValue1 == "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput1, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput1))
})
g.It("Author:qiowang-Medium-60539-Verify metrics ovs_vswitchd_interfaces_total. [Serial]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovs_vswitchd_interfaces_total"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
delta = 3
)
nodes, getNodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath='{.items[*].metadata.name}'").Output()
nodeName := strings.Split(strings.Trim(nodes, "'"), " ")[0]
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
podName, getPodNameErr := exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
exutil.By("1. Get the metrics of " + metricName + " before creating new pod on the node")
prometheusURL := "localhost:29105/metrics"
containerName := "kube-rbac-proxy-ovn-metrics"
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
exutil.By("2. Create test pods and scale test pods to 10")
ns := oc.Namespace()
createResourceFromFile(oc, ns, testPodFile)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name=test-pods"), fmt.Sprintf("Waiting for pod with label name=test-pods become ready timeout"))
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("rc/test-rc", "-n", ns, "-p", "{\"spec\":{\"template\":{\"spec\":{\"nodeName\":\""+nodeName+"\"}}}}", "--type=merge").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
scaleErr := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc/test-rc", "--replicas=0", "-n", ns).Execute()
o.Expect(scaleErr).NotTo(o.HaveOccurred())
scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc/test-rc", "--replicas=10", "-n", ns).Execute()
o.Expect(scaleErr).NotTo(o.HaveOccurred())
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name=test-pods"), fmt.Sprintf("Waiting for pod with label name=test-pods become ready timeout after scale up"))
exutil.By("3. Get the metrics of " + metricName + " after creating new pod on the node")
metricValue1Int, _ := strconv.Atoi(metricValue1)
expectedIncFloor := metricValue1Int + 10 - delta
expectedIncCeil := metricValue1Int + 10 + delta
e2e.Logf("The expected value of the %s is : %v to %v", metricName, expectedIncFloor, expectedIncCeil)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue2 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
metricValue2Int, _ := strconv.Atoi(metricValue2)
if metricValue2Int >= expectedIncFloor && metricValue2Int <= expectedIncCeil {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
exutil.By("4. Delete the pod on the node")
scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc/test-rc", "--replicas=0", "-n", ns).Execute()
o.Expect(scaleErr).NotTo(o.HaveOccurred())
delErr := waitForPodWithLabelGone(oc, ns, "name=test-pods")
o.Expect(delErr).NotTo(o.HaveOccurred())
exutil.By("5. Get the metrics of " + metricName + " after deleting the pod on the node")
expectedDecFloor := metricValue1Int - delta
expectedDecCeil := metricValue1Int + delta
e2e.Logf("The expected value of the %s is : %v to %v", metricName, expectedDecFloor, expectedDecCeil)
metricDecOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue3 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
metricValue3Int, _ := strconv.Atoi(metricValue3)
if metricValue3Int >= expectedDecFloor && metricValue3Int <= expectedDecCeil {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricDecOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricDecOutput))
})
g.It("NonPreRelease-Longduration-Author:qiowang-Medium-60708-Verify metrics ovnkube_resource_retry_failures_total. [Serial] [Slow]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovnkube_resource_retry_failures_total"
egressNodeLabel = "k8s.ovn.org/egress-assignable"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
)
exutil.By("1. Get the metrics of " + metricName + " before resource retry failure occur")
prometheusURL := "localhost:29108/metrics"
ovnMasterPodName := getOVNKMasterPod(oc)
containerName := "kube-rbac-proxy"
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
exutil.By("2. Configure egressip with invalid ip address to trigger resource retry")
exutil.By("2.1 Label EgressIP node")
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, nodeName, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeName, egressNodeLabel, "true")
exutil.By("2.2 Create new namespace and apply label")
oc.SetupProject()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "name-").Output()
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "name=test").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("2.3 Create egressip object with invalid ip address")
egressipName := "egressip-" + getRandomString()
egressip := egressIPResource1{
name: egressipName,
template: egressIPTemplate,
egressIP1: "a.b.c.d",
egressIP2: "a.b.0.1",
}
defer egressip.deleteEgressIPObject1(oc)
egressip.createEgressIPObject1(oc)
exutil.By("3. Waiting for ovn resource retry failure")
targetLog := egressipName + ": exceeded number of failed attempts"
checkErr := wait.Poll(2*time.Minute, 16*time.Minute, func() (bool, error) {
podLogs, logErr := exutil.GetSpecificPodLogs(oc, namespace, "ovnkube-cluster-manager", ovnMasterPodName, "'"+targetLog+"'")
if len(podLogs) == 0 || logErr != nil {
e2e.Logf("did not get expected podLogs, or have err: %v, try again", logErr)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to get expected log in pod %v, err: %v", ovnMasterPodName, checkErr))
exutil.By("4. Get the metrics of " + metricName + " again when resource retry failure occur")
metricValue1Int, _ := strconv.Atoi(metricValue1)
expectedIncValue := strconv.Itoa(metricValue1Int + 1)
e2e.Logf("The expected value of the %s is : %v", metricName, expectedIncValue)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue2 := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
if metricValue2 == expectedIncValue {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
})
g.It("NonHyperShiftHOST-Author:qiowang-Medium-60192-Verify metrics for egress ip unreachable and re-balance total [Disruptive] [Slow]", func() {
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "aws") || strings.Contains(platform, "gcp") || strings.Contains(platform, "openstack") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "azure") || strings.Contains(platform, "nutanix")
if !acceptedPlatform {
g.Skip("Test cases should be run on AWS/GCP/Azure/Openstack/Vsphere/BareMetal/Nutanix cluster with ovn network plugin, skip for other platforms !!")
}
var (
metricName1 = "ovnkube_clustermanager_egress_ips_rebalance_total"
metricName2 = "ovnkube_clustermanager_egress_ips_node_unreachable_total"
egressNodeLabel = "k8s.ovn.org/egress-assignable"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
)
exutil.By("1. Get list of nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
exutil.By("2. Configure egressip")
exutil.By("2.1 Label one EgressIP node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
exutil.By("2.2 Create new namespace and apply label")
oc.SetupProject()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "org-").Execute()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "org=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
exutil.By("2.3 Create egressip object")
ipStackType := checkIPStackType(oc)
var freeIPs []string
if ipStackType == "ipv6single" {
freeIPs = findFreeIPv6s(oc, egressNodes[0], 1)
} else {
freeIPs = findFreeIPs(oc, egressNodes[0], 1)
}
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-60192",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "purple",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
exutil.By("2.4. Check egressip is assigned to the egress node")
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
egressipAssignedNode1 := egressIPMaps1[0]["node"]
e2e.Logf("egressip is assigned to:%v", egressipAssignedNode1)
o.Expect(egressipAssignedNode1).To(o.ContainSubstring(egressNodes[0]))
exutil.By("3. Get the metrics before egressip re-balance")
prometheusURL := "localhost:29108/metrics"
ovnMasterPodName := getOVNKMasterPod(oc)
containerName := "kube-rbac-proxy"
metric1BeforeReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName1)
metric2BeforeReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName2)
exutil.By("4. Label one more EgressIP node and remove label from the previous one to trigger egressip rebalance")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
o.Eventually(func() bool {
egressIPMaps2 := getAssignedEIPInEIPObject(oc, egressip1.name)
return len(egressIPMaps2) == 1 && egressIPMaps2[0]["node"] == egressNodes[1]
}, "300s", "10s").Should(o.BeTrue(), "egressIP was not failover to the new egress node!")
e2e.Logf("egressip is assigned to:%v", egressNodes[1])
exutil.By("5. Get the metrics after egressip re-balance")
metric1ValueInt, parseIntErr1 := strconv.Atoi(metric1BeforeReboot)
o.Expect(parseIntErr1).NotTo(o.HaveOccurred())
expectedMetric1Value := strconv.Itoa(metric1ValueInt + 1)
e2e.Logf("The expected value of the %s is : %v", metricName1, expectedMetric1Value)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metric1AfterReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName1)
if metric1AfterReboot == expectedMetric1Value {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s, try again", metricName1)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
exutil.By("6. Reboot the egressip assigned node, to trigger egressip node unreachable")
defer checkNodeStatus(oc, egressNodes[1], "Ready")
rebootNode(oc, egressNodes[1])
checkNodeStatus(oc, egressNodes[1], "NotReady")
checkNodeStatus(oc, egressNodes[1], "Ready")
exutil.By("7. Get the metrics after egressip node unreachable")
metric2ValueInt, parseIntErr2 := strconv.Atoi(metric2BeforeReboot)
o.Expect(parseIntErr2).NotTo(o.HaveOccurred())
expectedMetric2Value := strconv.Itoa(metric2ValueInt + 1)
e2e.Logf("The expected value of the %s is : %v", metricName2, expectedMetric2Value)
metricIncOutput = wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metric2AfterReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName2)
if metric2AfterReboot == expectedMetric2Value {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s, try again", metricName2)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
})
g.It("Author:qiowang-Medium-60704-Verify metrics ovs_vswitchd_interface_up_wait_seconds_total. [Serial]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovs_vswitchd_interface_up_wait_seconds_total"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
nodes, getNodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath='{.items[*].metadata.name}'").Output()
nodeName := strings.Split(strings.Trim(nodes, "'"), " ")[0]
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
podName, getPodNameErr := exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
exutil.By("1. Get the metrics of " + metricName + " before creating new pods on the node")
prometheusURL := "localhost:29105/metrics"
containerName := "kube-rbac-proxy-ovn-metrics"
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
exutil.By("2. Create test pods and scale test pods to 30")
ns := oc.Namespace()
createResourceFromFile(oc, ns, testPodFile)
podReadyErr1 := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(podReadyErr1, "this pod with label name=test-pods not ready")
_, scaleUpErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("replicationcontroller/test-rc", "-n", ns, "-p", "{\"spec\":{\"replicas\":30,\"template\":{\"spec\":{\"nodeSelector\":{\"kubernetes.io/hostname\":\""+nodeName+"\"}}}}}", "--type=merge").Output()
o.Expect(scaleUpErr).NotTo(o.HaveOccurred())
podReadyErr2 := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(podReadyErr2, "this pod with label name=test-pods not all ready")
exutil.By("3. Get the metrics of " + metricName + " after creating new pods on the node")
metricValue1Float, parseErr1 := strconv.ParseFloat(metricValue1, 64)
o.Expect(parseErr1).NotTo(o.HaveOccurred())
e2e.Logf("The expected value of the %s should be greater than %v", metricName, metricValue1)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue2 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
metricValue2Float, parseErr2 := strconv.ParseFloat(metricValue2, 64)
o.Expect(parseErr2).NotTo(o.HaveOccurred())
if metricValue2Float > metricValue1Float {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
})
g.It("Author:qiowang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-64077-Verify metrics for ipsec enabled/disabled when configure it at runtime [Disruptive] [Slow]", func() {
var (
metricName = "ovnkube_controller_ipsec_enabled"
)
ipsecState := checkIPsec(oc)
if ipsecState == "{}" || ipsecState == "Full" || ipsecState == "External" {
g.Skip("Skip the testing in the ipsec enabled clusters!!!")
}
exutil.By("1. Enable IPsec at runtime")
defer configIPSecAtRuntime(oc, "disabled")
enableErr := configIPSecAtRuntime(oc, "full")
o.Expect(enableErr).NotTo(o.HaveOccurred())
exutil.By("2. Check metrics for IPsec enabled/disabled after enabling at runtime")
prometheusURL := "localhost:29103/metrics"
containerName := "kube-rbac-proxy-node"
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
e2e.Logf("The expected value of the %s is 1", metricName)
ipsecEnabled := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValueAfterEnabled := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
if metricValueAfterEnabled == "1" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s when enabled IPSec and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(ipsecEnabled, fmt.Sprintf("Fail to get metric when enabled IPSec and the error is:%s", ipsecEnabled))
//Add one more step check to cover bug https://issues.redhat.com/browse/OCPBUGS-29305
exutil.By("3. Verify no openssl error in ipsec pods ds")
output, ipsecDSErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ds", "ovn-ipsec-host", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(ipsecDSErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "checkedn")).NotTo(o.BeTrue())
o.Expect(strings.Contains(output, "checkend")).To(o.BeTrue())
exutil.By("4. Disable IPsec at runtime")
disableErr := configIPSecAtRuntime(oc, "disabled")
o.Expect(disableErr).NotTo(o.HaveOccurred())
exutil.By("5. Check metrics for IPsec enabled/disabled after disabling at runtime")
ovnMasterPodName = getOVNKMasterOVNkubeNode(oc)
e2e.Logf("The expected value of the %s is 0", metricName)
ipsecDisabled := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValueAfterDisabled := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
if metricValueAfterDisabled == "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s when disabled IPSec and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(ipsecDisabled, fmt.Sprintf("Fail to get metric when disabled IPSec and the error is:%s", ipsecDisabled))
})
// author [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-High-72893-IPSec state can be shown in prometheus endpoint.", func() {
metricQuery := "openshift:openshift_network_operator_ipsec_state:info"
exutil.By(fmt.Sprintf("Check that the metric %s is exposed to telemetry", metricQuery))
expectedExposedMetric := fmt.Sprintf(`{__name__=\"%s\"}`, metricQuery)
telemetryConfig, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "openshift-monitoring", "telemetry-config", "-o=jsonpath={.data}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(telemetryConfig).To(o.ContainSubstring(expectedExposedMetric),
"Metric %s, is not exposed to telemetry", metricQuery)
mon, err := exutil.NewPrometheusMonitor(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating new prometheus monitor")
exutil.By(fmt.Sprintf("Verify the metric %s displays the right value", metricQuery))
queryResult, err := mon.SimpleQuery(metricQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", metricQuery)
jsonResult := gjson.Parse(queryResult)
e2e.Logf(jsonResult.String())
status := jsonResult.Get("status").String()
o.Expect(status).Should(o.Equal("success"),
"Query %s execution failed: %s", metricQuery, status)
is_legacy_api := gjson.Parse(queryResult).Get("data.result.0.metric.is_legacy_api").String()
mode := gjson.Parse(queryResult).Get("data.result.0.metric.mode").String()
metricValue := gjson.Parse(queryResult).Get("data.result.0.value.1").String()
o.Expect(metricValue).Should(o.Equal("1"))
ipsecState := checkIPsec(oc)
switch ipsecState {
case "Full":
o.Expect(is_legacy_api).Should(o.Equal("false"))
o.Expect(mode).Should(o.Equal("Full"))
case "External":
o.Expect(is_legacy_api).Should(o.Equal("false"))
o.Expect(mode).Should(o.Equal("External"))
case "Disabled":
o.Expect(is_legacy_api).Should(o.Equal("false"))
o.Expect(mode).Should(o.Equal("Disabled"))
case "{}":
o.Expect(is_legacy_api).Should(o.Equal("true"))
o.Expect(mode).Should(o.Equal("Full"))
default:
o.Expect(is_legacy_api).Should(o.Equal("N/A - ipsec not supported (non-OVN network)"))
o.Expect(mode).Should(o.Equal("Disabled"))
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
4833dee2-68f9-4b2b-a093-b328fec0a96c
|
NonHyperShiftHOST-Author:weliang-Medium-47524-Metrics for ovn-appctl stopwatch/show command.
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-47524-Metrics for ovn-appctl stopwatch/show command.", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
prometheusURL := "localhost:29105/metrics"
metricName1 := "ovn_controller_if_status_mgr_run_total_samples"
metricName2 := "ovn_controller_if_status_mgr_run_long_term_avg"
metricName3 := "ovn_controller_bfd_run_total_samples"
metricName4 := "ovn_controller_bfd_run_long_term_avg"
metricName5 := "ovn_controller_flow_installation_total_samples"
metricName6 := "ovn_controller_flow_installation_long_term_avg"
metricName7 := "ovn_controller_if_status_mgr_run_total_samples"
metricName8 := "ovn_controller_if_status_mgr_run_long_term_avg"
metricName9 := "ovn_controller_if_status_mgr_update_total_samples"
metricName10 := "ovn_controller_if_status_mgr_update_long_term_avg"
metricName11 := "ovn_controller_flow_generation_total_samples"
metricName12 := "ovn_controller_flow_generation_long_term_avg"
metricName13 := "ovn_controller_pinctrl_run_total_samples"
metricName14 := "ovn_controller_pinctrl_run_long_term_avg"
metricName15 := "ovn_controller_ofctrl_seqno_run_total_samples"
metricName16 := "ovn_controller_ofctrl_seqno_run_long_term_avg"
metricName17 := "ovn_controller_patch_run_total_samples"
metricName18 := "ovn_controller_patch_run_long_term_avg"
metricName19 := "ovn_controller_ct_zone_commit_total_samples"
metricName20 := "ovn_controller_ct_zone_commit_long_term_avg"
metricName := []string{metricName1, metricName2, metricName3, metricName4, metricName5, metricName6, metricName7, metricName8, metricName9, metricName10, metricName11, metricName12, metricName13, metricName14, metricName15, metricName16, metricName17, metricName18, metricName19, metricName20}
for _, value := range metricName {
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, value)
if metricValue != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", value)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
358939a5-0547-4f59-b3a9-324a8e638283
|
NonHyperShiftHOST-Author:weliang-Medium-47471-Record update to cache versus port binding.
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-47471-Record update to cache versus port binding.", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
metricName1 := "ovnkube_controller_pod_first_seen_lsp_created_duration_seconds_count"
metricName2 := "ovnkube_controller_pod_lsp_created_port_binding_duration_seconds_count"
metricName3 := "ovnkube_controller_pod_port_binding_port_binding_chassis_duration_seconds_count"
metricName4 := "ovnkube_controller_pod_port_binding_chassis_port_binding_up_duration_seconds_count"
prometheusURL := "localhost:29103/metrics"
metricName := []string{metricName1, metricName2, metricName3, metricName4}
for _, value := range metricName {
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, value)
if metricValue != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", value)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
4bc21103-6a6a-4d76-a04c-63721806c2fd
|
NonHyperShiftHOST-Author:weliang-Medium-45841-Add OVN flow count metric.
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-45841-Add OVN flow count metric.", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
prometheusURL := "localhost:29105/metrics"
metricName := "ovn_controller_integration_bridge_openflow_total"
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
if metricValue != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
cbaf32dc-884d-4167-a399-f688a1b74bd8
|
NonHyperShiftHOST-Author:weliang-Medium-45688-Metrics for egress firewall. [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-45688-Metrics for egress firewall. [Disruptive]", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/metrics")
egressFirewall = filepath.Join(buildPruningBaseDir, "OVN-Rules.yaml")
)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
var metricValue1 string
var metricValue2 string
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
metricName := "ovnkube_controller_num_egress_firewall_rules"
prometheusURL := "localhost:29103/metrics"
exutil.By("get the metrics of ovnkube_controller_num_egress_firewall_rules before configuration")
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue1 = getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
e2e.Logf("The output of the ovnkube_master_num_egress_firewall_rules metrics before applying egressfirewall rules is : %v", metricValue1)
if metricValue1 >= "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
exutil.By("create egressfirewall rules in OVN cluster")
fwErr := oc.AsAdmin().Run("create").Args("-n", ns, "-f", egressFirewall).Execute()
o.Expect(fwErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("-n", ns, "-f", egressFirewall).Execute()
fwOutput, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("egressfirewall", "-n", ns).Output()
o.Expect(fwOutput).To(o.ContainSubstring("EgressFirewall Rules applied"))
metricsOutputAfter := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue2 = getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
e2e.Logf("The output of the ovnkube_master_num_egress_firewall_rules metrics after applying egressfirewall rules is : %v", metricValue1)
metricValue1Int, _ := strconv.Atoi(metricValue1)
metricValue2Int, _ := strconv.Atoi(metricValue2)
if metricValue2Int == metricValue1Int+3 {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutputAfter, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
f746c43a-0f85-46e5-8b7a-98733a8659e1
|
NonHyperShiftHOST-Author:weliang-Medium-45842-Metrics for IPSec enabled/disabled
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-45842-Metrics for IPSec enabled/disabled", func() {
var (
namespace = "openshift-ovn-kubernetes"
ovncmName = "kube-rbac-proxy-ovn-metrics"
podLabel = "app=ovnkube-node"
)
ipsecState := checkIPsec(oc)
e2e.Logf("The ipsec state is : %v", ipsecState)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
prometheusURL := "localhost:29103/metrics"
metricName := "ovnkube_controller_ipsec_enabled"
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
e2e.Logf("The output of the ovnkube_controller_ipsec_enabled metrics is : %v", metricValue)
if metricValue == "1" && (ipsecState == "{}" || ipsecState == "Full") {
e2e.Logf("The IPsec is enabled in the cluster")
return true, nil
} else if metricValue == "0" && (ipsecState == "Disabled" || ipsecState == "External") {
e2e.Logf("The IPsec is disabled in the cluster")
return true, nil
} else {
e2e.Failf("Testing fail to get the correct metrics of ovnkube_controller_ipsec_enabled")
return false, nil
}
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
30097a8d-f20a-4222-bc29-7f73d5db4828
|
NonHyperShiftHOST-Author:weliang-Medium-45687-Metrics for egress router
|
['"os/exec"', '"path/filepath"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-45687-Metrics for egress router", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/metrics")
egressrouterPod = filepath.Join(buildPruningBaseDir, "egressrouter.yaml")
)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("create a test pod")
podErr1 := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", egressrouterPod, "-n", ns).Execute()
o.Expect(podErr1).NotTo(o.HaveOccurred())
podErr2 := waitForPodWithLabelReady(oc, oc.Namespace(), "app=egress-router-cni")
exutil.AssertWaitPollNoErr(podErr2, "egressrouterPod is not running")
podName := getPodName(oc, "openshift-multus", "app=multus-admission-controller")
output, err := oc.AsAdmin().Run("exec").Args("-n", "openshift-multus", podName[0], "--", "curl", "localhost:9091/metrics").OutputToFile("metrics.txt")
o.Expect(err).NotTo(o.HaveOccurred())
metricOutput, _ := exec.Command("bash", "-c", "cat "+output+" | grep egress-router | awk '{print $2}'").Output()
metricValue := strings.TrimSpace(string(metricOutput))
e2e.Logf("The output of the egress-router metrics is : %v", metricValue)
o.Expect(metricValue).To(o.ContainSubstring("1"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
e786e264-647a-4058-b136-a96fe98052dd
|
NonHyperShiftHOST-Author:weliang-Medium-45685-Metrics for Metrics for egressIP. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-45685-Metrics for Metrics for egressIP. [Disruptive]", func() {
var (
ovncmName = "kube-rbac-proxy"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
)
platform := checkPlatform(oc)
if !strings.Contains(platform, "vsphere") {
g.Skip("Skip for un-expected platform, egreeIP testing need to be executed on a vsphere cluster!")
}
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
podName := getOVNKMasterPod(oc)
metricName := "ovnkube_clustermanager_num_egress_ips"
prometheusURL := "localhost:29108/metrics"
exutil.By("get the metrics of ovnkube_controller_num_egress_firewall_rules before configuration")
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
if metricValue == "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
exutil.By("Label EgressIP node")
var EgressNodeLabel = "k8s.ovn.org/egress-assignable"
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
if err != nil {
e2e.Logf("Unexpected error occurred: %v", err)
}
exutil.By("Apply EgressLabel Key on one node.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, nodeList.Items[0].Name, EgressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeList.Items[0].Name, EgressNodeLabel, "true")
exutil.By("Apply label to namespace")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Output()
_, err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create an egressip object")
sub1, _ := getDefaultSubnet(oc)
ips := findUnUsedIPs(oc, sub1, 2)
egressip1 := egressIPResource1{
name: "egressip-45685",
template: egressIPTemplate,
egressIP1: ips[0],
egressIP2: ips[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
exutil.By("get the metrics of ovnkube_controller_num_egress_firewall_rules after configuration")
metricsOutputAfter := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, ovncmName, podName, prometheusURL, metricName)
if metricValue == "1" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutputAfter, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutputAfter))
})
| |||||
test case
|
openshift/openshift-tests-private
|
c113ab9f-ab93-4329-b7af-91cbc46e5218
|
NonHyperShiftHOST-Author:weliang-Medium-45689-Metrics for idling enable/disabled.
|
['"fmt"', '"net"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-45689-Metrics for idling enable/disabled.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "metrics/metrics-pod.yaml")
testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
testPodName = "hello-pod"
)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("get controller-managert service ip address")
managertServiceIP := getControllerManagerLeaderIP(oc)
svcURL := net.JoinHostPort(managertServiceIP, "8443")
prometheusURL := "https://" + svcURL + "/metrics"
var metricNumber string
metricsErr := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output := getOVNMetrics(oc, prometheusURL)
metricOutput, _ := exec.Command("bash", "-c", "cat "+output+" | grep openshift_unidle_events_total | awk 'NR==3{print $2}'").Output()
metricNumber = strings.TrimSpace(string(metricOutput))
e2e.Logf("The output of openshift_unidle_events metrics is : %v", metricNumber)
if metricNumber != "" {
return true, nil
}
e2e.Logf("Can't get correct metrics of openshift_unidle_events and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr))
exutil.By("create a service")
createResourceFromFile(oc, ns, testSvcFile)
ServiceOutput, serviceErr := oc.WithoutNamespace().Run("get").Args("service", "-n", ns).Output()
o.Expect(serviceErr).NotTo(o.HaveOccurred())
o.Expect(ServiceOutput).To(o.ContainSubstring("test-service"))
exutil.By("create a test pod")
createResourceFromFile(oc, ns, testPodFile)
podErr := waitForPodWithLabelReady(oc, ns, "name=hello-pod")
exutil.AssertWaitPollNoErr(podErr, "hello-pod is not running")
exutil.By("get test service ip address")
testServiceIP, _ := getSvcIP(oc, ns, "test-service") //This case is check metrics not svc testing, do not need use test-service dual-stack address
dstURL := net.JoinHostPort(testServiceIP, "27017")
exutil.By("test-pod can curl service ip address:port")
_, svcerr1 := e2eoutput.RunHostCmd(ns, testPodName, "curl -connect-timeout 5 -s "+dstURL)
o.Expect(svcerr1).NotTo(o.HaveOccurred())
exutil.By("idle test-service")
_, idleerr := oc.Run("idle").Args("-n", ns, "test-service").Output()
o.Expect(idleerr).NotTo(o.HaveOccurred())
exutil.By("test pod can curl service address:port again to unidle the svc")
//Need curl serverice several times, otherwise casue curl: (7) Failed to connect to 172.30.248.18 port 27017
//after 0 ms: Connection refused\ncommand terminated with exit code 7\n\nerror:\nexit status 7"
for i := 0; i < 3; i++ {
e2eoutput.RunHostCmd(ns, testPodName, "curl -connect-timeout 5 -s "+dstURL)
}
_, svcerr2 := e2eoutput.RunHostCmd(ns, testPodName, "curl -connect-timeout 5 -s "+dstURL)
o.Expect(svcerr2).NotTo(o.HaveOccurred())
//Because Bug 2064786: Not always can get the metrics of openshift_unidle_events_total
//Need curl several times to get the metrics of openshift_unidle_events_total
metricsOutput := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output := getOVNMetrics(oc, prometheusURL)
metricOutput, _ := exec.Command("bash", "-c", "cat "+output+" | grep openshift_unidle_events_total | awk 'NR==3{print $2}'").Output()
metricValue := strings.TrimSpace(string(metricOutput))
e2e.Logf("The output of openshift_unidle_events metrics is : %v", metricValue)
if !strings.Contains(metricValue, metricNumber) {
return true, nil
}
e2e.Logf("Can't get correct metrics of openshift_unidle_events and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
80afb893-2cb3-43a5-9ef7-556582d3ad91
|
NonHyperShiftHOST-Author:weliang-Medium-52072- Add mechanism to record duration for k8 kinds.
|
['"net"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-52072- Add mechanism to record duration for k8 kinds.", func() {
var (
namespace = "openshift-ovn-kubernetes"
podLabel = "app=ovnkube-node"
)
podName := getLeaderInfo(oc, namespace, podLabel, "ovnkubernetes")
leaderNodeIP := getPodIPv4(oc, namespace, podName)
ip := net.ParseIP(leaderNodeIP)
var prometheusURL string
if ip.To4() == nil {
prometheusURL = "https://[" + leaderNodeIP + "]:9103/metrics"
} else {
prometheusURL = "https://" + leaderNodeIP + ":9103/metrics"
}
metricName1 := "ovnkube_controller_network_programming_ovn_duration_seconds_bucket"
metricName2 := "ovnkube_controller_network_programming_duration_seconds_bucket"
checkovnkubeMasterNetworkProgrammingetrics(oc, prometheusURL, metricName1)
checkovnkubeMasterNetworkProgrammingetrics(oc, prometheusURL, metricName2)
})
| |||||
test case
|
openshift/openshift-tests-private
|
fea95ab0-8526-49ab-8131-b2090de39327
|
Author:qiowang-Medium-53969-Verify OVN controller SB DB connection status metric works [Disruptive]
|
['"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("Author:qiowang-Medium-53969-Verify OVN controller SB DB connection status metric works [Disruptive]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovn_controller_southbound_database_connected"
)
nodes, getNodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath='{.items[*].metadata.name}'").Output()
nodeName := strings.Split(strings.Trim(nodes, "'"), " ")[0]
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
podName, getPodNameErr := exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
exutil.By("1. Restart pod " + podName + " in " + namespace + " to make the pod logs clear")
delPodErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", podName, "-n", namespace, "--ignore-not-found=true").Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
podName, getPodNameErr = exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
waitPodReady(oc, namespace, podName)
exutil.By("2. Get the metrics of " + metricName + " when ovn controller connected to SB DB")
prometheusURL := "localhost:29105/metrics"
containerName := "kube-rbac-proxy-ovn-metrics"
metricsOutput := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
if metricValue == "1" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput))
exutil.By("3. remove ovnsb_db.sock and restart ovn controller process to disconnect socket from ovn controller to SB DB")
defer func() {
deferErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", podName, "-n", namespace, "--ignore-not-found=true").Execute()
o.Expect(deferErr).NotTo(o.HaveOccurred())
podName, getPodNameErr = exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
waitPodReady(oc, namespace, podName)
}()
_, rmErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, "-c", "ovn-controller", podName, "--", "rm", "-f", "/var/run/ovn/ovnsb_db.sock").Output()
o.Expect(rmErr).NotTo(o.HaveOccurred())
getPid, getErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, "-c", "ovn-controller", podName, "--", "cat", "/var/run/ovn/ovn-controller.pid").Output()
o.Expect(getErr).NotTo(o.HaveOccurred())
pid := strings.Split(getPid, "\n")[0]
_, killErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, "-c", "ovn-controller", podName, "--", "kill", "-9", pid).Output()
o.Expect(killErr).NotTo(o.HaveOccurred())
exutil.By("4. Waiting for ovn controller disconnected to SB DB")
_, getLogErr := exutil.WaitAndGetSpecificPodLogs(oc, namespace, "ovn-controller", podName, "\"/var/run/ovn/ovnsb_db.sock: continuing to reconnect in the background\"")
o.Expect(getLogErr).NotTo(o.HaveOccurred())
exutil.By("5. Get the metrics of " + metricName + " when ovn controller disconnected to SB DB")
metricsOutput1 := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
if metricValue1 == "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricsOutput1, fmt.Sprintf("Fail to get metric and the error is:%s", metricsOutput1))
})
| |||||
test case
|
openshift/openshift-tests-private
|
fdb14cb3-3800-4709-be92-4bbdbddd0453
|
Author:qiowang-Medium-60539-Verify metrics ovs_vswitchd_interfaces_total. [Serial]
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("Author:qiowang-Medium-60539-Verify metrics ovs_vswitchd_interfaces_total. [Serial]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovs_vswitchd_interfaces_total"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
delta = 3
)
nodes, getNodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath='{.items[*].metadata.name}'").Output()
nodeName := strings.Split(strings.Trim(nodes, "'"), " ")[0]
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
podName, getPodNameErr := exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
exutil.By("1. Get the metrics of " + metricName + " before creating new pod on the node")
prometheusURL := "localhost:29105/metrics"
containerName := "kube-rbac-proxy-ovn-metrics"
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
exutil.By("2. Create test pods and scale test pods to 10")
ns := oc.Namespace()
createResourceFromFile(oc, ns, testPodFile)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name=test-pods"), fmt.Sprintf("Waiting for pod with label name=test-pods become ready timeout"))
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("rc/test-rc", "-n", ns, "-p", "{\"spec\":{\"template\":{\"spec\":{\"nodeName\":\""+nodeName+"\"}}}}", "--type=merge").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
scaleErr := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc/test-rc", "--replicas=0", "-n", ns).Execute()
o.Expect(scaleErr).NotTo(o.HaveOccurred())
scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc/test-rc", "--replicas=10", "-n", ns).Execute()
o.Expect(scaleErr).NotTo(o.HaveOccurred())
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name=test-pods"), fmt.Sprintf("Waiting for pod with label name=test-pods become ready timeout after scale up"))
exutil.By("3. Get the metrics of " + metricName + " after creating new pod on the node")
metricValue1Int, _ := strconv.Atoi(metricValue1)
expectedIncFloor := metricValue1Int + 10 - delta
expectedIncCeil := metricValue1Int + 10 + delta
e2e.Logf("The expected value of the %s is : %v to %v", metricName, expectedIncFloor, expectedIncCeil)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue2 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
metricValue2Int, _ := strconv.Atoi(metricValue2)
if metricValue2Int >= expectedIncFloor && metricValue2Int <= expectedIncCeil {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
exutil.By("4. Delete the pod on the node")
scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc/test-rc", "--replicas=0", "-n", ns).Execute()
o.Expect(scaleErr).NotTo(o.HaveOccurred())
delErr := waitForPodWithLabelGone(oc, ns, "name=test-pods")
o.Expect(delErr).NotTo(o.HaveOccurred())
exutil.By("5. Get the metrics of " + metricName + " after deleting the pod on the node")
expectedDecFloor := metricValue1Int - delta
expectedDecCeil := metricValue1Int + delta
e2e.Logf("The expected value of the %s is : %v to %v", metricName, expectedDecFloor, expectedDecCeil)
metricDecOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue3 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
metricValue3Int, _ := strconv.Atoi(metricValue3)
if metricValue3Int >= expectedDecFloor && metricValue3Int <= expectedDecCeil {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricDecOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricDecOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
c738f0cc-717a-492f-a2cc-8db2b3de17cc
|
NonPreRelease-Longduration-Author:qiowang-Medium-60708-Verify metrics ovnkube_resource_retry_failures_total. [Serial] [Slow]
|
['"fmt"', '"path/filepath"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonPreRelease-Longduration-Author:qiowang-Medium-60708-Verify metrics ovnkube_resource_retry_failures_total. [Serial] [Slow]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovnkube_resource_retry_failures_total"
egressNodeLabel = "k8s.ovn.org/egress-assignable"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
)
exutil.By("1. Get the metrics of " + metricName + " before resource retry failure occur")
prometheusURL := "localhost:29108/metrics"
ovnMasterPodName := getOVNKMasterPod(oc)
containerName := "kube-rbac-proxy"
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
exutil.By("2. Configure egressip with invalid ip address to trigger resource retry")
exutil.By("2.1 Label EgressIP node")
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, nodeName, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeName, egressNodeLabel, "true")
exutil.By("2.2 Create new namespace and apply label")
oc.SetupProject()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "name-").Output()
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "name=test").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("2.3 Create egressip object with invalid ip address")
egressipName := "egressip-" + getRandomString()
egressip := egressIPResource1{
name: egressipName,
template: egressIPTemplate,
egressIP1: "a.b.c.d",
egressIP2: "a.b.0.1",
}
defer egressip.deleteEgressIPObject1(oc)
egressip.createEgressIPObject1(oc)
exutil.By("3. Waiting for ovn resource retry failure")
targetLog := egressipName + ": exceeded number of failed attempts"
checkErr := wait.Poll(2*time.Minute, 16*time.Minute, func() (bool, error) {
podLogs, logErr := exutil.GetSpecificPodLogs(oc, namespace, "ovnkube-cluster-manager", ovnMasterPodName, "'"+targetLog+"'")
if len(podLogs) == 0 || logErr != nil {
e2e.Logf("did not get expected podLogs, or have err: %v, try again", logErr)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to get expected log in pod %v, err: %v", ovnMasterPodName, checkErr))
exutil.By("4. Get the metrics of " + metricName + " again when resource retry failure occur")
metricValue1Int, _ := strconv.Atoi(metricValue1)
expectedIncValue := strconv.Itoa(metricValue1Int + 1)
e2e.Logf("The expected value of the %s is : %v", metricName, expectedIncValue)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue2 := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
if metricValue2 == expectedIncValue {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
f2de788b-aadc-4c18-914d-2e006e8532b4
|
NonHyperShiftHOST-Author:qiowang-Medium-60192-Verify metrics for egress ip unreachable and re-balance total [Disruptive] [Slow]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("NonHyperShiftHOST-Author:qiowang-Medium-60192-Verify metrics for egress ip unreachable and re-balance total [Disruptive] [Slow]", func() {
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "aws") || strings.Contains(platform, "gcp") || strings.Contains(platform, "openstack") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "azure") || strings.Contains(platform, "nutanix")
if !acceptedPlatform {
g.Skip("Test cases should be run on AWS/GCP/Azure/Openstack/Vsphere/BareMetal/Nutanix cluster with ovn network plugin, skip for other platforms !!")
}
var (
metricName1 = "ovnkube_clustermanager_egress_ips_rebalance_total"
metricName2 = "ovnkube_clustermanager_egress_ips_node_unreachable_total"
egressNodeLabel = "k8s.ovn.org/egress-assignable"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
)
exutil.By("1. Get list of nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
exutil.By("2. Configure egressip")
exutil.By("2.1 Label one EgressIP node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
exutil.By("2.2 Create new namespace and apply label")
oc.SetupProject()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "org-").Execute()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "org=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
exutil.By("2.3 Create egressip object")
ipStackType := checkIPStackType(oc)
var freeIPs []string
if ipStackType == "ipv6single" {
freeIPs = findFreeIPv6s(oc, egressNodes[0], 1)
} else {
freeIPs = findFreeIPs(oc, egressNodes[0], 1)
}
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-60192",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "purple",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
exutil.By("2.4. Check egressip is assigned to the egress node")
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
egressipAssignedNode1 := egressIPMaps1[0]["node"]
e2e.Logf("egressip is assigned to:%v", egressipAssignedNode1)
o.Expect(egressipAssignedNode1).To(o.ContainSubstring(egressNodes[0]))
exutil.By("3. Get the metrics before egressip re-balance")
prometheusURL := "localhost:29108/metrics"
ovnMasterPodName := getOVNKMasterPod(oc)
containerName := "kube-rbac-proxy"
metric1BeforeReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName1)
metric2BeforeReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName2)
exutil.By("4. Label one more EgressIP node and remove label from the previous one to trigger egressip rebalance")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
o.Eventually(func() bool {
egressIPMaps2 := getAssignedEIPInEIPObject(oc, egressip1.name)
return len(egressIPMaps2) == 1 && egressIPMaps2[0]["node"] == egressNodes[1]
}, "300s", "10s").Should(o.BeTrue(), "egressIP was not failover to the new egress node!")
e2e.Logf("egressip is assigned to:%v", egressNodes[1])
exutil.By("5. Get the metrics after egressip re-balance")
metric1ValueInt, parseIntErr1 := strconv.Atoi(metric1BeforeReboot)
o.Expect(parseIntErr1).NotTo(o.HaveOccurred())
expectedMetric1Value := strconv.Itoa(metric1ValueInt + 1)
e2e.Logf("The expected value of the %s is : %v", metricName1, expectedMetric1Value)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metric1AfterReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName1)
if metric1AfterReboot == expectedMetric1Value {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s, try again", metricName1)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
exutil.By("6. Reboot the egressip assigned node, to trigger egressip node unreachable")
defer checkNodeStatus(oc, egressNodes[1], "Ready")
rebootNode(oc, egressNodes[1])
checkNodeStatus(oc, egressNodes[1], "NotReady")
checkNodeStatus(oc, egressNodes[1], "Ready")
exutil.By("7. Get the metrics after egressip node unreachable")
metric2ValueInt, parseIntErr2 := strconv.Atoi(metric2BeforeReboot)
o.Expect(parseIntErr2).NotTo(o.HaveOccurred())
expectedMetric2Value := strconv.Itoa(metric2ValueInt + 1)
e2e.Logf("The expected value of the %s is : %v", metricName2, expectedMetric2Value)
metricIncOutput = wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metric2AfterReboot := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName2)
if metric2AfterReboot == expectedMetric2Value {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s, try again", metricName2)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
5b9df26e-ff14-4c86-9092-4ba8f9a58d56
|
Author:qiowang-Medium-60704-Verify metrics ovs_vswitchd_interface_up_wait_seconds_total. [Serial]
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("Author:qiowang-Medium-60704-Verify metrics ovs_vswitchd_interface_up_wait_seconds_total. [Serial]", func() {
var (
namespace = "openshift-ovn-kubernetes"
metricName = "ovs_vswitchd_interface_up_wait_seconds_total"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
nodes, getNodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath='{.items[*].metadata.name}'").Output()
nodeName := strings.Split(strings.Trim(nodes, "'"), " ")[0]
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
podName, getPodNameErr := exutil.GetPodName(oc, namespace, "app=ovnkube-node", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
exutil.By("1. Get the metrics of " + metricName + " before creating new pods on the node")
prometheusURL := "localhost:29105/metrics"
containerName := "kube-rbac-proxy-ovn-metrics"
metricValue1 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
exutil.By("2. Create test pods and scale test pods to 30")
ns := oc.Namespace()
createResourceFromFile(oc, ns, testPodFile)
podReadyErr1 := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(podReadyErr1, "this pod with label name=test-pods not ready")
_, scaleUpErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("replicationcontroller/test-rc", "-n", ns, "-p", "{\"spec\":{\"replicas\":30,\"template\":{\"spec\":{\"nodeSelector\":{\"kubernetes.io/hostname\":\""+nodeName+"\"}}}}}", "--type=merge").Output()
o.Expect(scaleUpErr).NotTo(o.HaveOccurred())
podReadyErr2 := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(podReadyErr2, "this pod with label name=test-pods not all ready")
exutil.By("3. Get the metrics of " + metricName + " after creating new pods on the node")
metricValue1Float, parseErr1 := strconv.ParseFloat(metricValue1, 64)
o.Expect(parseErr1).NotTo(o.HaveOccurred())
e2e.Logf("The expected value of the %s should be greater than %v", metricName, metricValue1)
metricIncOutput := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValue2 := getOVNMetricsInSpecificContainer(oc, containerName, podName, prometheusURL, metricName)
metricValue2Float, parseErr2 := strconv.ParseFloat(metricValue2, 64)
o.Expect(parseErr2).NotTo(o.HaveOccurred())
if metricValue2Float > metricValue1Float {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(metricIncOutput, fmt.Sprintf("Fail to get metric and the error is:%s", metricIncOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
6ce3e994-c350-47d2-98ff-269e83fd573e
|
Author:qiowang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-64077-Verify metrics for ipsec enabled/disabled when configure it at runtime [Disruptive] [Slow]
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("Author:qiowang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-64077-Verify metrics for ipsec enabled/disabled when configure it at runtime [Disruptive] [Slow]", func() {
var (
metricName = "ovnkube_controller_ipsec_enabled"
)
ipsecState := checkIPsec(oc)
if ipsecState == "{}" || ipsecState == "Full" || ipsecState == "External" {
g.Skip("Skip the testing in the ipsec enabled clusters!!!")
}
exutil.By("1. Enable IPsec at runtime")
defer configIPSecAtRuntime(oc, "disabled")
enableErr := configIPSecAtRuntime(oc, "full")
o.Expect(enableErr).NotTo(o.HaveOccurred())
exutil.By("2. Check metrics for IPsec enabled/disabled after enabling at runtime")
prometheusURL := "localhost:29103/metrics"
containerName := "kube-rbac-proxy-node"
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
e2e.Logf("The expected value of the %s is 1", metricName)
ipsecEnabled := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValueAfterEnabled := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
if metricValueAfterEnabled == "1" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s when enabled IPSec and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(ipsecEnabled, fmt.Sprintf("Fail to get metric when enabled IPSec and the error is:%s", ipsecEnabled))
//Add one more step check to cover bug https://issues.redhat.com/browse/OCPBUGS-29305
exutil.By("3. Verify no openssl error in ipsec pods ds")
output, ipsecDSErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ds", "ovn-ipsec-host", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(ipsecDSErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "checkedn")).NotTo(o.BeTrue())
o.Expect(strings.Contains(output, "checkend")).To(o.BeTrue())
exutil.By("4. Disable IPsec at runtime")
disableErr := configIPSecAtRuntime(oc, "disabled")
o.Expect(disableErr).NotTo(o.HaveOccurred())
exutil.By("5. Check metrics for IPsec enabled/disabled after disabling at runtime")
ovnMasterPodName = getOVNKMasterOVNkubeNode(oc)
e2e.Logf("The expected value of the %s is 0", metricName)
ipsecDisabled := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
metricValueAfterDisabled := getOVNMetricsInSpecificContainer(oc, containerName, ovnMasterPodName, prometheusURL, metricName)
if metricValueAfterDisabled == "0" {
return true, nil
}
e2e.Logf("Can't get correct metrics value of %s when disabled IPSec and try again", metricName)
return false, nil
})
exutil.AssertWaitPollNoErr(ipsecDisabled, fmt.Sprintf("Fail to get metric when disabled IPSec and the error is:%s", ipsecDisabled))
})
| |||||
test case
|
openshift/openshift-tests-private
|
874a7968-1338-4829-ba38-b7312ee51ff0
|
Author:huirwang-NonHyperShiftHOST-High-72893-IPSec state can be shown in prometheus endpoint.
|
['"fmt"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metrics.go
|
g.It("Author:huirwang-NonHyperShiftHOST-High-72893-IPSec state can be shown in prometheus endpoint.", func() {
metricQuery := "openshift:openshift_network_operator_ipsec_state:info"
exutil.By(fmt.Sprintf("Check that the metric %s is exposed to telemetry", metricQuery))
expectedExposedMetric := fmt.Sprintf(`{__name__=\"%s\"}`, metricQuery)
telemetryConfig, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "openshift-monitoring", "telemetry-config", "-o=jsonpath={.data}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(telemetryConfig).To(o.ContainSubstring(expectedExposedMetric),
"Metric %s, is not exposed to telemetry", metricQuery)
mon, err := exutil.NewPrometheusMonitor(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating new prometheus monitor")
exutil.By(fmt.Sprintf("Verify the metric %s displays the right value", metricQuery))
queryResult, err := mon.SimpleQuery(metricQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", metricQuery)
jsonResult := gjson.Parse(queryResult)
e2e.Logf(jsonResult.String())
status := jsonResult.Get("status").String()
o.Expect(status).Should(o.Equal("success"),
"Query %s execution failed: %s", metricQuery, status)
is_legacy_api := gjson.Parse(queryResult).Get("data.result.0.metric.is_legacy_api").String()
mode := gjson.Parse(queryResult).Get("data.result.0.metric.mode").String()
metricValue := gjson.Parse(queryResult).Get("data.result.0.value.1").String()
o.Expect(metricValue).Should(o.Equal("1"))
ipsecState := checkIPsec(oc)
switch ipsecState {
case "Full":
o.Expect(is_legacy_api).Should(o.Equal("false"))
o.Expect(mode).Should(o.Equal("Full"))
case "External":
o.Expect(is_legacy_api).Should(o.Equal("false"))
o.Expect(mode).Should(o.Equal("External"))
case "Disabled":
o.Expect(is_legacy_api).Should(o.Equal("false"))
o.Expect(mode).Should(o.Equal("Disabled"))
case "{}":
o.Expect(is_legacy_api).Should(o.Equal("true"))
o.Expect(mode).Should(o.Equal("Full"))
default:
o.Expect(is_legacy_api).Should(o.Equal("N/A - ipsec not supported (non-OVN network)"))
o.Expect(mode).Should(o.Equal("Disabled"))
}
})
| |||||
test
|
openshift/openshift-tests-private
|
a1edfd6f-a3c5-4bee-b56b-925e4101a889
|
multihoming
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = g.Describe("[sig-networking] SDN multihoming", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-multihoming", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60505-Multihoming Verify the ip4 connectivity between multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv4network60505"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/24",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Check if the network-attach-defintion is created")
if checkNAD(oc, ns1, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
g.By("Check if the new OVN switch is created")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).ShouldNot(o.Equal(""))
o.Eventually(func() bool {
return checkOVNSwitch(oc, nadName, ovnMasterPodName)
}, 20*time.Second, 5*time.Second).Should(o.BeTrue(), "The correct OVN switch is not created")
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
g.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
g.By("Check if the new OVN switch ports is created")
listSWCmd := "ovn-nbctl show | grep port | grep " + nadName + " "
podname := []string{pod1Name[0], pod2Name[0], pod3Name[0]}
o.Eventually(func() bool {
listOutput, _ := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listSWCmd)
return checkOVNswitchPorts(podname, listOutput)
}, 20*time.Second, 5*time.Second).Should(o.BeTrue(), "The correct OVN switch ports are not created")
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Check if the new OVN switch ports are deleted after deleting the pods")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("all", "--all", "-n", ns1).Execute()).NotTo(o.HaveOccurred())
//After deleting pods, it will take several seconds to delete the switch ports
o.Eventually(func() bool {
listOutput, _ := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listSWCmd)
return checkOVNswitchPorts(podname, listOutput)
}, 20*time.Second, 5*time.Second).ShouldNot(o.BeTrue(), "The correct OVN switch ports are not deleted")
g.By("Check if the network-attach-defintion is deleted")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()).NotTo(o.HaveOccurred())
if !checkNAD(oc, ns1, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is deleted!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not deleted!", nadName)
}
g.By("Check if the new created OVN switch is deleted")
o.Eventually(func() bool {
return checkOVNSwitch(oc, nadName, ovnMasterPodName)
}, 20*time.Second, 5*time.Second).ShouldNot(o.BeTrue(), "The correct OVN switch is not deleted")
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60506-Multihoming Verify the ipv6 connectivity between multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv6network60506"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "fd00:dead:beef::0/64",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod1Name[0])
e2e.Logf("The v6 address of pod1 is: %v", pod1IPv6)
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod2Name[0])
e2e.Logf("The v6 address of pod2 is: %v", pod2IPv6)
g.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod3Name[0])
e2e.Logf("The v6 address of pod3 is: %v", pod3IPv6)
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv6, "net1", pod2.podenvname)
g.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv6, "net1", pod3.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv6, "net1", pod1.podenvname)
g.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv6, "net1", pod3.podenvname)
g.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv6, "net1", pod1.podenvname)
g.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv6, "net1", pod2.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60507-Multihoming Verify the dualstack connectivity between multihoming pods", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60508-Multihoming Verify ipv4 address excludeSubnets for multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2excludeipv4network60508"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.10.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "192.168.10.0/30,192.168.10.6/32",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
o.Eventually(func() string {
podStatus, _ := getPodStatus(oc, ns1, pod3Name[0])
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", pod3Name[0]))
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
if strings.Contains(pod1IPv4, "192.168.10.1") || strings.Contains(pod1IPv4, "192.168.10.2") || strings.Contains(pod1IPv4, "192.168.10.3") || strings.Contains(pod1IPv4, "192.168.10.6") {
e2e.Failf("Pod: %s get a wrong excluded ipv4 address", pod1Name[0])
}
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
if strings.Contains(pod2IPv4, "192.168.10.1") || strings.Contains(pod2IPv4, "192.168.10.2") || strings.Contains(pod2IPv4, "192.168.10.3") || strings.Contains(pod2IPv4, "192.168.10.6") {
e2e.Failf("Pod: %s get a wrong excluded ipv4 address", pod2Name[0])
}
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60509-Multihoming Verify ipv6 address excludeSubnets for multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2excludeipv6network60509"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "fd00:dead:beef:1::0/126",
nswithnadname: nsWithnad,
excludeSubnets: "fd00:dead:beef:1::0/127",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
o.Eventually(func() string {
podStatus, _ := getPodStatus(oc, ns1, pod3Name[0])
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", pod3Name[0]))
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod1Name[0])
e2e.Logf("The v6 address of pod1 is: %v", pod1IPv6)
if !strings.Contains(pod1IPv6, "fd00:dead:beef:1::2") && !strings.Contains(pod1IPv6, "fd00:dead:beef:1::3") {
e2e.Failf("Pod: %s does not get correct ipv6 address", pod1Name[0])
}
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod2Name[0])
e2e.Logf("The v6 address of pod2 is: %v", pod2IPv6)
if !strings.Contains(pod1IPv6, "fd00:dead:beef:1::2") && !strings.Contains(pod1IPv6, "fd00:dead:beef:1::3") {
e2e.Failf("Pod: %s does not get correct ipv6 address", pod2Name[0])
}
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv6, "net1", pod2.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv6, "net1", pod1.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-62548-Multihoming Verify multihoming pods with multiple attachments to the different OVN-K networks", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName1 := "layer2dualstacknetwork1"
nsWithnad1 := ns1 + "/" + nadName1
nadName2 := "layer2dualstacknetwork2"
nsWithnad2 := ns1 + "/" + nadName2
nadName3 := nadName1 + "," + nadName2
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create two custom resource network-attach-defintions in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName1, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName1,
subnets: "192.168.100.0/24,fd00:dead:beef::0/64",
nswithnadname: nsWithnad1,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName2, "-n", ns1).Execute()
nad1ns2 := multihomingNAD{
namespace: ns1,
nadname: nadName2,
subnets: "192.168.110.0/24,fd00:dead:beee::0/64",
nswithnadname: nsWithnad2,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns2.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintions in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName3,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintions in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName3,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintions in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName3,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's net1 interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, ns1, pod1Name[0], "net1")
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
g.By("Get IPs from the pod2's net1 interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, ns1, pod2Name[0], "net1")
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
g.By("Get IPs from the pod3's net1 interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3Net1IPv4, pod3Net1IPv6 := getPodMultiNetworks(oc, ns1, pod3Name[0], "net1")
e2e.Logf("The v4 address of pod3's net1 is: %v", pod3Net1IPv4)
e2e.Logf("The v6 address of pod3's net1 is: %v", pod3Net1IPv6)
g.By("Checking net1 connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
g.By("Checking net1 connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
g.By("Get IPs from the pod1's net2 interface")
pod1Net2IPv4, pod1Net2IPv6 := getPodMultiNetworks(oc, ns1, pod1Name[0], "net2")
e2e.Logf("The v4 address of pod1's net2 is: %v", pod1Net2IPv4, pod1.podenvname)
e2e.Logf("The v6 address of pod1's net2 is: %v", pod1Net2IPv6, pod1.podenvname)
g.By("Get IPs from the pod2's net2 interface")
pod2Net2IPv4, pod2Net2IPv6 := getPodMultiNetworks(oc, ns1, pod2Name[0], "net2")
e2e.Logf("The v4 address of pod2's net2 is: %v", pod2Net2IPv4, pod2.podenvname)
e2e.Logf("The v6 address of pod2's net2 is: %v", pod2Net2IPv6, pod2.podenvname)
g.By("Get IPs from the pod3's net2 interface")
pod3Net2IPv4, pod3Net2IPv6 := getPodMultiNetworks(oc, ns1, pod3Name[0], "net2")
e2e.Logf("The v4 address of pod3's net2 is: %v", pod3Net2IPv4)
e2e.Logf("The v6 address of pod3's net2 is: %v", pod3Net2IPv6)
g.By("Checking net2 connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net2IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net2IPv6, "net2", pod2.podenvname)
g.By("Checking net2 connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net2IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net2IPv6, "net2", pod3.podenvname)
g.By("Checking net2 connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net2IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net2IPv6, "net2", pod1.podenvname)
g.By("Checking net2 connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net2IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net2IPv6, "net2", pod3.podenvname)
g.By("Checking net2 connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net2IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net2IPv6, "net2", pod1.podenvname)
g.By("Checking net2 connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net2IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net2IPv6, "net2", pod2.podenvname)
//Check no pods connectivity cross two OVN-K networks in layer2 topology
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net1IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net1IPv6, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net2IPv6, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net1IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net1IPv6, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net2IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net2IPv6, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net1IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net1IPv6, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net2IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net2IPv6, "net1", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net1IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net1IPv6, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net2IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net2IPv6, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net1IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net1IPv6, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net2IPv6, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net1IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net1IPv6, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net2IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net2IPv6, "net1", pod1.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60511-Multihoming Verify the dualstack connectivity between multihoming pods after deleting ovn-northbound-leader pod. [Disruptive]", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
g.By("Delete ovn-northbound-leader pod")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnMasterPodName, "-n", "openshift-ovn-kubernetes").Execute()).NotTo(o.HaveOccurred())
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60512-Multihoming Verify the dualstack connectivity between multihoming pods after deleting all ovnkube-master pods. [Disruptive]", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
g.By("Delete all ovnkube-control-plane pods")
ovnMasterPodNames := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
for _, ovnPod := range ovnMasterPodNames {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnPod, "-n", "openshift-ovn-kubernetes").Execute()).NotTo(o.HaveOccurred())
}
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60516-Multihoming Verify the dualstack connectivity between multihoming pods after deleting all ovnkube-node pods. [Disruptive]", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
g.By("Delete all ovnkube-node pods")
ovnNodePodNames := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
for _, ovnPod := range ovnNodePodNames {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnPod, "-n", "openshift-ovn-kubernetes").Execute()).NotTo(o.HaveOccurred())
}
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60564-Multihoming Verify the connectivity between multihoming pods without setting subnets", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv4network60564"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create a pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Verify the pod will fail to get IP from it's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
getPodMultiNetworkFail(oc, ns1, pod1Name[0])
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-63186-Multihoming Verify the connectivity between multihoming pods with static IP", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingStaticPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-staticpod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv4network63186"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
ip1 := "192.168.10.10" + "/" + "24"
pod1 := testMultihomingStaticPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
macaddress: "02:03:04:05:06:01",
ipaddress: ip1,
template: multihomingStaticPodTemplate,
}
pod1.createTestMultihomingStaticPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
ip2 := "192.168.10.20" + "/" + "24"
pod2 := testMultihomingStaticPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
macaddress: "02:03:04:05:06:02",
ipaddress: ip2,
template: multihomingStaticPodTemplate,
}
pod2.createTestMultihomingStaticPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
ip3 := "192.168.10.30" + "/" + "24"
pod3 := testMultihomingStaticPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
macaddress: "02:03:04:05:06:03",
ipaddress: ip3,
template: multihomingStaticPodTemplate,
}
pod3.createTestMultihomingStaticPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
g.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv4, "net1", pod2.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-60510-Multihoming Verify multihoming pods with multiple attachments to the same OVN-K networks", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingSharenetNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-sharenet-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName1 := "layer2dualstacknetwork1"
nsWithnad1 := ns1 + "/" + nadName1
nadName2 := "layer2dualstacknetwork2"
nsWithnad2 := ns1 + "/" + nadName2
sharenet := "192.168.100.0/24,fd00:dead:beef::0/64"
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create two custom resource network-attach-defintions in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName1, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName1,
subnets: sharenet,
nswithnadname: nsWithnad1,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName2, "-n", ns1).Execute()
nad1ns2 := multihomingSharenetNAD{
namespace: ns1,
nadname: nadName2,
subnets: sharenet,
nswithnadname: nsWithnad2,
excludeSubnets: "",
topology: value,
sharenetname: nadName1,
template: multihomingSharenetNADTemplate,
}
nad1ns2.createMultihomingSharenetNAD(oc)
g.By("Create 1st pod consuming first network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName1,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming second network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName2,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming second network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName2,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's net1 interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, ns1, pod1Name[0], "net1")
e2e.Logf("The v4 address of pod1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1 is: %v", pod1Net1IPv6)
g.By("Get IPs from the pod2's net1 interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, ns1, pod2Name[0], "net1")
e2e.Logf("The v4 address of pod2 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2 is: %v", pod2Net1IPv6)
g.By("Get IPs from the pod3's net1 interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3Net1IPv4, pod3Net1IPv6 := getPodMultiNetworks(oc, ns1, pod3Name[0], "net1")
e2e.Logf("The v4 address of pod3 is: %v", pod3Net1IPv4)
e2e.Logf("The v6 address of pod3 is: %v", pod3Net1IPv6)
g.By("Checking net1 connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
g.By("Checking net1 connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64810-Multihoming verify ingress-ipblock policy. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
ipBlockIngressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-ingress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "ipblockingress64810"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
var podName, podLabel, podenvName, nodeLocation string
pod := []testMultihomingPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name="+podLabel)).NotTo(o.HaveOccurred())
}
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
exutil.By("Get IPs from the pod4's secondary interface")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod4IPv4, _ := getPodMultiNetwork(oc, ns1, pod4Name[0])
e2e.Logf("The v4 address of pod4 is: %v", pod4IPv4)
exutil.By("Get IPs from the pod5's secondary interface")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod5IPv4, _ := getPodMultiNetwork(oc, ns1, pod5Name[0])
e2e.Logf("The v4 address of pod5 is: %v", pod5IPv4)
exutil.By("Get IPs from the pod6's secondary interface")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
pod6IPv4, _ := getPodMultiNetwork(oc, ns1, pod6Name[0])
e2e.Logf("The v4 address of pod6 is: %v", pod6IPv4)
// Not like multus/whereabouts, six pods will not always get ip addresses in the order of IP's address, need to reroder the
// existing pods' name to the new testpods names by the order of IP's addresses
type podInfor struct {
podName string
podenvName string
}
podData := map[string]podInfor{
pod1IPv4: {podName: pod1Name[0], podenvName: pod[0].podenvname},
pod2IPv4: {podName: pod2Name[0], podenvName: pod[1].podenvname},
pod3IPv4: {podName: pod3Name[0], podenvName: pod[2].podenvname},
pod4IPv4: {podName: pod4Name[0], podenvName: pod[3].podenvname},
pod5IPv4: {podName: pod5Name[0], podenvName: pod[4].podenvname},
pod6IPv4: {podName: pod6Name[0], podenvName: pod[5].podenvname},
}
testpod1IP := "192.168.100.1"
testpod1Name := podData[testpod1IP].podName
testpod1envName := podData[testpod1IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod1 are: %v, %v, %v", testpod1IP, testpod1Name, testpod1envName)
testpod2IP := "192.168.100.2"
testpod2Name := podData[testpod2IP].podName
testpod2envName := podData[testpod2IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod2 are: %v, %v, %v", testpod2IP, testpod2Name, testpod2envName)
testpod3IP := "192.168.100.3"
testpod3Name := podData[testpod3IP].podName
testpod3envName := podData[testpod3IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod3 are: %v, %v, %v", testpod3IP, testpod3Name, testpod3envName)
testpod4IP := "192.168.100.4"
testpod4Name := podData[testpod4IP].podName
testpod4envName := podData[testpod4IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod4 are: %v, %v, %v", testpod4IP, testpod4Name, testpod4envName)
testpod5IP := "192.168.100.5"
testpod5Name := podData[testpod5IP].podName
testpod5envName := podData[testpod5IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod5 are: %v, %v, %v", testpod5IP, testpod5Name, testpod5envName)
testpod6IP := "192.168.100.6"
testpod6Name := podData[testpod6IP].podName
testpod6envName := podData[testpod6IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod6 are: %v, %v, %v", testpod6IP, testpod6Name, testpod6envName)
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("Create ingress ipblock to block the traffic from the pods in the range of 192.168.100.4 to 192.168.100.6")
ipIngressBlock := multihomingIPBlock{
name: "ipblock-ingress",
template: ipBlockIngressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
ipIngressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-ingress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check only the pods which get 192.168.100.4 to 192.168.100.6 can not communicate to others after applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-ingress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
e2e.Logf("Delete all the pods and NAD for topology: %v ----------------------------", value)
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "--all", "-n", ns1).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
_, delNADErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Output()
o.Expect(delNADErr).NotTo(o.HaveOccurred())
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64811-Multihoming verify egress-ipblock policy. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
ipBlockEgressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-egress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "ipblockingress64811"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
var podName, podLabel, podenvName, nodeLocation string
pod := []testMultihomingPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name="+podLabel)).NotTo(o.HaveOccurred())
}
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
exutil.By("Get IPs from the pod4's secondary interface")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod4IPv4, _ := getPodMultiNetwork(oc, ns1, pod4Name[0])
e2e.Logf("The v4 address of pod4 is: %v", pod4IPv4)
exutil.By("Get IPs from the pod5's secondary interface")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod5IPv4, _ := getPodMultiNetwork(oc, ns1, pod5Name[0])
e2e.Logf("The v4 address of pod5 is: %v", pod5IPv4)
exutil.By("Get IPs from the pod6's secondary interface")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
pod6IPv4, _ := getPodMultiNetwork(oc, ns1, pod6Name[0])
e2e.Logf("The v4 address of pod6 is: %v", pod6IPv4)
// Not like multus/whereabouts, six pods will not always get ip addresses in the order of IP's address, need to reroder the
// existing pods' name to the new testpods names by the order of IP's addresses
type podInfor struct {
podName string
podenvName string
}
podData := map[string]podInfor{
pod1IPv4: {podName: pod1Name[0], podenvName: pod[0].podenvname},
pod2IPv4: {podName: pod2Name[0], podenvName: pod[1].podenvname},
pod3IPv4: {podName: pod3Name[0], podenvName: pod[2].podenvname},
pod4IPv4: {podName: pod4Name[0], podenvName: pod[3].podenvname},
pod5IPv4: {podName: pod5Name[0], podenvName: pod[4].podenvname},
pod6IPv4: {podName: pod6Name[0], podenvName: pod[5].podenvname},
}
testpod1IP := "192.168.100.1"
testpod1Name := podData[testpod1IP].podName
testpod1envName := podData[testpod1IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod1 are: %v, %v, %v", testpod1IP, testpod1Name, testpod1envName)
testpod2IP := "192.168.100.2"
testpod2Name := podData[testpod2IP].podName
testpod2envName := podData[testpod2IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod2 are: %v, %v, %v", testpod2IP, testpod2Name, testpod2envName)
testpod3IP := "192.168.100.3"
testpod3Name := podData[testpod3IP].podName
testpod3envName := podData[testpod3IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod3 are: %v, %v, %v", testpod3IP, testpod3Name, testpod3envName)
testpod4IP := "192.168.100.4"
testpod4Name := podData[testpod4IP].podName
testpod4envName := podData[testpod4IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod4 are: %v, %v, %v", testpod4IP, testpod4Name, testpod4envName)
testpod5IP := "192.168.100.5"
testpod5Name := podData[testpod5IP].podName
testpod5envName := podData[testpod5IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod5 are: %v, %v, %v", testpod5IP, testpod5Name, testpod5envName)
testpod6IP := "192.168.100.6"
testpod6Name := podData[testpod6IP].podName
testpod6envName := podData[testpod6IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod6 are: %v, %v, %v", testpod6IP, testpod6Name, testpod6envName)
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("Create a egress ipblock to block the traffic to the pods in the range of 192.168.100.4 to 192.168.100.6")
ipEgressBlock := multihomingIPBlock{
name: "ipblock-egress",
template: ipBlockEgressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Execute()
ipEgressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-egress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check all pods can communicate to 192.168.100.1-3 but can not communicate to 192.168.100.4-6 after applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodFail(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
e2e.Logf("Delete all the pods and NAD for topology: %v ----------------------------", value)
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "--all", "-n", ns1).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
_, delNADErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Output()
o.Expect(delNADErr).NotTo(o.HaveOccurred())
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64812-Multihoming verify ingressandegress-ipblock policy. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
ipBlockTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-ingressandegress-template.yaml")
ipv4Cidr = "192.168.100.6/32"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "ingressandegress"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
var podName, podLabel, podenvName, nodeLocation string
pod := []testMultihomingPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name="+podLabel)).NotTo(o.HaveOccurred())
}
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
exutil.By("Get IPs from the pod4's secondary interface")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod4IPv4, _ := getPodMultiNetwork(oc, ns1, pod4Name[0])
e2e.Logf("The v4 address of pod4 is: %v", pod4IPv4)
exutil.By("Get IPs from the pod5's secondary interface")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod5IPv4, _ := getPodMultiNetwork(oc, ns1, pod5Name[0])
e2e.Logf("The v4 address of pod5 is: %v", pod5IPv4)
exutil.By("Get IPs from the pod6's secondary interface")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
pod6IPv4, _ := getPodMultiNetwork(oc, ns1, pod6Name[0])
e2e.Logf("The v4 address of pod6 is: %v", pod6IPv4)
// Not like multus/whereabouts, six pods will not always get ip addresses in the order of IP's address, need to reroder the
// existing pods' name to the new testpods names by the order of IP's addresses
type podInfor struct {
podName string
podenvName string
}
podData := map[string]podInfor{
pod1IPv4: {podName: pod1Name[0], podenvName: pod[0].podenvname},
pod2IPv4: {podName: pod2Name[0], podenvName: pod[1].podenvname},
pod3IPv4: {podName: pod3Name[0], podenvName: pod[2].podenvname},
pod4IPv4: {podName: pod4Name[0], podenvName: pod[3].podenvname},
pod5IPv4: {podName: pod5Name[0], podenvName: pod[4].podenvname},
pod6IPv4: {podName: pod6Name[0], podenvName: pod[5].podenvname},
}
testpod1IP := "192.168.100.1"
testpod1Name := podData[testpod1IP].podName
testpod1envName := podData[testpod1IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod1 are: %v, %v, %v", testpod1IP, testpod1Name, testpod1envName)
testpod2IP := "192.168.100.2"
testpod2Name := podData[testpod2IP].podName
testpod2envName := podData[testpod2IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod2 are: %v, %v, %v", testpod2IP, testpod2Name, testpod2envName)
testpod3IP := "192.168.100.3"
testpod3Name := podData[testpod3IP].podName
testpod3envName := podData[testpod3IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod3 are: %v, %v, %v", testpod3IP, testpod3Name, testpod3envName)
testpod4IP := "192.168.100.4"
testpod4Name := podData[testpod4IP].podName
testpod4envName := podData[testpod4IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod4 are: %v, %v, %v", testpod4IP, testpod4Name, testpod4envName)
testpod5IP := "192.168.100.5"
testpod5Name := podData[testpod5IP].podName
testpod5envName := podData[testpod5IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod5 are: %v, %v, %v", testpod5IP, testpod5Name, testpod5envName)
testpod6IP := "192.168.100.6"
testpod6Name := podData[testpod6IP].podName
testpod6envName := podData[testpod6IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod6 are: %v, %v, %v", testpod6IP, testpod6Name, testpod6envName)
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod6IP, "net1", testpod6envName)
exutil.By("Create a egress ipblock to allow only ip4.src == 192.168.100.5 to ip4.dst == 192.168.100.6")
ingressandegress := multihomingIPBlock{
name: "ingressandegress",
template: ipBlockTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ingressandegress", "-n", ns1).Execute()
ingressandegress.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ingressandegress"))
exutil.By("Check a ACL rule is created for ip4.src == 192.168.100.5/32")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.src == 192.168.100.5/32"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check a ACL rule is created for ip4.dst == 192.168.100.6/32")
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.dst == 192.168.100.6/32"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check only ip4.src == 192.168.100.5 to ip4.dst == 192.168.100.6 will be allowed after applying policy")
CurlMultusPod2PodFail(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodFail(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodFail(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod6IP, "net1", testpod6envName)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ingressandegress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.src == 192.168.100.5/32"), fmt.Sprintf("Failed to delete policy on the cluster"))
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.dst == 192.168.100.6/32"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod6IP, "net1", testpod6envName)
e2e.Logf("Delete all the pods and NAD for topology: %v ----------------------------", value)
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "--all", "-n", ns1).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
_, delNADErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Output()
o.Expect(delNADErr).NotTo(o.HaveOccurred())
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-65002-Multihoming verify ingress-ipblock policy with static IP. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-staticpod-template.yaml")
ipBlockIngressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-ingress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
reloadState := "True.*True.*False"
normalState := "True.*False.*False"
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Get the name of testing namespace")
ns1 := oc.Namespace()
nadName := "ipblockingress65002"
nsWithnad := ns1 + "/" + nadName
topology := "layer2"
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: topology,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", ns1).Execute()
var podName, podLabel, podenvName, nodeLocation, macAddress, ipAddress string
pod := []testMultihomingStaticPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
macAddress = "02:03:04:05:06:0" + strconv.Itoa(i)
ipAddress = "192.168.100." + strconv.Itoa(i) + "/" + "29"
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingStaticPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
macaddress: macAddress,
ipaddress: ipAddress,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingStaticPod(oc)
}
exutil.By("Check all pods are online")
for i := 1; i < 7; i++ {
podLabel = "multihoming-pod" + strconv.Itoa(i)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns1, "name="+podLabel), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", podLabel))
}
exutil.By("Get pod's name from each pod")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("Create ingress ipblock to block the traffic from the pods in the range of 192.168.100.4 to 192.168.100.6")
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-ingress", "-n", ns1).Output()
ipIngressBlock := multihomingIPBlock{
name: "ipblock-ingress",
template: ipBlockIngressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
ipIngressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-ingress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check all pods can communicate to 192.168.100.1-3 but can not communicate to 192.168.100.4-6 after applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-ingress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
exutil.By("All curl should pass again after deleting policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-65003-Multihoming verify egress-ipblock policy with static IP. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-staticpod-template.yaml")
ipBlockEgressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-egress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
reloadState := "True.*True.*False"
normalState := "True.*False.*False"
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Get the name of a namespace")
ns1 := oc.Namespace()
nadName := "ipblockegress65003"
nsWithnad := ns1 + "/" + nadName
topology := "layer2"
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: topology,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", ns1).Execute()
var podName, podLabel, podenvName, nodeLocation, macAddress, ipAddress string
pod := []testMultihomingStaticPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
macAddress = "02:03:04:05:06:0" + strconv.Itoa(i)
ipAddress = "192.168.100." + strconv.Itoa(i) + "/" + "29"
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingStaticPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
macaddress: macAddress,
ipaddress: ipAddress,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingStaticPod(oc)
}
exutil.By("Check all pods are online")
for i := 1; i < 7; i++ {
podLabel = "multihoming-pod" + strconv.Itoa(i)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns1, "name="+podLabel), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", podLabel))
}
exutil.By("Get pod's name from each pod")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("Create a egress ipblock to block the traffic to the pods in the range of 192.168.100.4 to 192.168.100.6")
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Execute()
ipEgressBlock := multihomingIPBlock{
name: "ipblock-egress",
template: ipBlockEgressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
ipEgressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-egress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check all pods can communicate to 192.168.100.1-3 but can not communicate to 192.168.100.4-6 after applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
dd66cecc-447f-4b1e-b6ca-8df0c295a5b1
|
NonHyperShiftHOST-Author:weliang-Medium-60505-Multihoming Verify the ip4 connectivity between multihoming pods
|
['"context"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60505-Multihoming Verify the ip4 connectivity between multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv4network60505"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/24",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Check if the network-attach-defintion is created")
if checkNAD(oc, ns1, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
g.By("Check if the new OVN switch is created")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).ShouldNot(o.Equal(""))
o.Eventually(func() bool {
return checkOVNSwitch(oc, nadName, ovnMasterPodName)
}, 20*time.Second, 5*time.Second).Should(o.BeTrue(), "The correct OVN switch is not created")
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
g.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
g.By("Check if the new OVN switch ports is created")
listSWCmd := "ovn-nbctl show | grep port | grep " + nadName + " "
podname := []string{pod1Name[0], pod2Name[0], pod3Name[0]}
o.Eventually(func() bool {
listOutput, _ := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listSWCmd)
return checkOVNswitchPorts(podname, listOutput)
}, 20*time.Second, 5*time.Second).Should(o.BeTrue(), "The correct OVN switch ports are not created")
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Check if the new OVN switch ports are deleted after deleting the pods")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("all", "--all", "-n", ns1).Execute()).NotTo(o.HaveOccurred())
//After deleting pods, it will take several seconds to delete the switch ports
o.Eventually(func() bool {
listOutput, _ := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listSWCmd)
return checkOVNswitchPorts(podname, listOutput)
}, 20*time.Second, 5*time.Second).ShouldNot(o.BeTrue(), "The correct OVN switch ports are not deleted")
g.By("Check if the network-attach-defintion is deleted")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()).NotTo(o.HaveOccurred())
if !checkNAD(oc, ns1, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is deleted!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not deleted!", nadName)
}
g.By("Check if the new created OVN switch is deleted")
o.Eventually(func() bool {
return checkOVNSwitch(oc, nadName, ovnMasterPodName)
}, 20*time.Second, 5*time.Second).ShouldNot(o.BeTrue(), "The correct OVN switch is not deleted")
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
6917ccb8-8e70-4350-b7ab-6fb3b12efa65
|
NonHyperShiftHOST-Author:weliang-Medium-60506-Multihoming Verify the ipv6 connectivity between multihoming pods
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60506-Multihoming Verify the ipv6 connectivity between multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv6network60506"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "fd00:dead:beef::0/64",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod1Name[0])
e2e.Logf("The v6 address of pod1 is: %v", pod1IPv6)
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod2Name[0])
e2e.Logf("The v6 address of pod2 is: %v", pod2IPv6)
g.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod3Name[0])
e2e.Logf("The v6 address of pod3 is: %v", pod3IPv6)
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv6, "net1", pod2.podenvname)
g.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv6, "net1", pod3.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv6, "net1", pod1.podenvname)
g.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv6, "net1", pod3.podenvname)
g.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv6, "net1", pod1.podenvname)
g.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv6, "net1", pod2.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
b877d8da-eb04-45f4-9a90-dc862145a53d
|
NonHyperShiftHOST-Author:weliang-Medium-60507-Multihoming Verify the dualstack connectivity between multihoming pods
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60507-Multihoming Verify the dualstack connectivity between multihoming pods", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
89a5a8f2-6ab8-4703-a671-2bfe9ec423d5
|
NonHyperShiftHOST-Author:weliang-Medium-60508-Multihoming Verify ipv4 address excludeSubnets for multihoming pods
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60508-Multihoming Verify ipv4 address excludeSubnets for multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2excludeipv4network60508"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.10.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "192.168.10.0/30,192.168.10.6/32",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
o.Eventually(func() string {
podStatus, _ := getPodStatus(oc, ns1, pod3Name[0])
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", pod3Name[0]))
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
if strings.Contains(pod1IPv4, "192.168.10.1") || strings.Contains(pod1IPv4, "192.168.10.2") || strings.Contains(pod1IPv4, "192.168.10.3") || strings.Contains(pod1IPv4, "192.168.10.6") {
e2e.Failf("Pod: %s get a wrong excluded ipv4 address", pod1Name[0])
}
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
if strings.Contains(pod2IPv4, "192.168.10.1") || strings.Contains(pod2IPv4, "192.168.10.2") || strings.Contains(pod2IPv4, "192.168.10.3") || strings.Contains(pod2IPv4, "192.168.10.6") {
e2e.Failf("Pod: %s get a wrong excluded ipv4 address", pod2Name[0])
}
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
8eb941ed-bdfe-4f02-93a9-b0a0b71a8258
|
NonHyperShiftHOST-Author:weliang-Medium-60509-Multihoming Verify ipv6 address excludeSubnets for multihoming pods
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60509-Multihoming Verify ipv6 address excludeSubnets for multihoming pods", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2excludeipv6network60509"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "fd00:dead:beef:1::0/126",
nswithnadname: nsWithnad,
excludeSubnets: "fd00:dead:beef:1::0/127",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
o.Eventually(func() string {
podStatus, _ := getPodStatus(oc, ns1, pod3Name[0])
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", pod3Name[0]))
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod1Name[0])
e2e.Logf("The v6 address of pod1 is: %v", pod1IPv6)
if !strings.Contains(pod1IPv6, "fd00:dead:beef:1::2") && !strings.Contains(pod1IPv6, "fd00:dead:beef:1::3") {
e2e.Failf("Pod: %s does not get correct ipv6 address", pod1Name[0])
}
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv6 := getPodMultiNetworkIPv6(oc, ns1, pod2Name[0])
e2e.Logf("The v6 address of pod2 is: %v", pod2IPv6)
if !strings.Contains(pod1IPv6, "fd00:dead:beef:1::2") && !strings.Contains(pod1IPv6, "fd00:dead:beef:1::3") {
e2e.Failf("Pod: %s does not get correct ipv6 address", pod2Name[0])
}
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv6, "net1", pod2.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv6, "net1", pod1.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
1bd4c9f4-8c24-4af9-aa43-310a7fd33102
|
NonHyperShiftHOST-Author:weliang-Medium-62548-Multihoming Verify multihoming pods with multiple attachments to the different OVN-K networks
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-62548-Multihoming Verify multihoming pods with multiple attachments to the different OVN-K networks", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName1 := "layer2dualstacknetwork1"
nsWithnad1 := ns1 + "/" + nadName1
nadName2 := "layer2dualstacknetwork2"
nsWithnad2 := ns1 + "/" + nadName2
nadName3 := nadName1 + "," + nadName2
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create two custom resource network-attach-defintions in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName1, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName1,
subnets: "192.168.100.0/24,fd00:dead:beef::0/64",
nswithnadname: nsWithnad1,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName2, "-n", ns1).Execute()
nad1ns2 := multihomingNAD{
namespace: ns1,
nadname: nadName2,
subnets: "192.168.110.0/24,fd00:dead:beee::0/64",
nswithnadname: nsWithnad2,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns2.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintions in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName3,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintions in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName3,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintions in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName3,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's net1 interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, ns1, pod1Name[0], "net1")
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
g.By("Get IPs from the pod2's net1 interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, ns1, pod2Name[0], "net1")
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
g.By("Get IPs from the pod3's net1 interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3Net1IPv4, pod3Net1IPv6 := getPodMultiNetworks(oc, ns1, pod3Name[0], "net1")
e2e.Logf("The v4 address of pod3's net1 is: %v", pod3Net1IPv4)
e2e.Logf("The v6 address of pod3's net1 is: %v", pod3Net1IPv6)
g.By("Checking net1 connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
g.By("Checking net1 connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
g.By("Get IPs from the pod1's net2 interface")
pod1Net2IPv4, pod1Net2IPv6 := getPodMultiNetworks(oc, ns1, pod1Name[0], "net2")
e2e.Logf("The v4 address of pod1's net2 is: %v", pod1Net2IPv4, pod1.podenvname)
e2e.Logf("The v6 address of pod1's net2 is: %v", pod1Net2IPv6, pod1.podenvname)
g.By("Get IPs from the pod2's net2 interface")
pod2Net2IPv4, pod2Net2IPv6 := getPodMultiNetworks(oc, ns1, pod2Name[0], "net2")
e2e.Logf("The v4 address of pod2's net2 is: %v", pod2Net2IPv4, pod2.podenvname)
e2e.Logf("The v6 address of pod2's net2 is: %v", pod2Net2IPv6, pod2.podenvname)
g.By("Get IPs from the pod3's net2 interface")
pod3Net2IPv4, pod3Net2IPv6 := getPodMultiNetworks(oc, ns1, pod3Name[0], "net2")
e2e.Logf("The v4 address of pod3's net2 is: %v", pod3Net2IPv4)
e2e.Logf("The v6 address of pod3's net2 is: %v", pod3Net2IPv6)
g.By("Checking net2 connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net2IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net2IPv6, "net2", pod2.podenvname)
g.By("Checking net2 connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net2IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net2IPv6, "net2", pod3.podenvname)
g.By("Checking net2 connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net2IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net2IPv6, "net2", pod1.podenvname)
g.By("Checking net2 connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net2IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net2IPv6, "net2", pod3.podenvname)
g.By("Checking net2 connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net2IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net2IPv6, "net2", pod1.podenvname)
g.By("Checking net2 connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net2IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net2IPv6, "net2", pod2.podenvname)
//Check no pods connectivity cross two OVN-K networks in layer2 topology
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net1IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net1IPv6, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod2Net2IPv6, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net1IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net1IPv6, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net2IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod1Name[0], pod3Net2IPv6, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net1IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net1IPv6, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net2IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod1Net2IPv6, "net1", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net1IPv4, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net1IPv6, "net2", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net2IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], pod3Net2IPv6, "net1", pod3.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net1IPv4, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net1IPv6, "net2", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod2Net2IPv6, "net1", pod2.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net1IPv4, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net1IPv6, "net2", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net2IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], pod1Net2IPv6, "net1", pod1.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0430d2e8-56e0-4b21-b90f-67f8405f7100
|
Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60511-Multihoming Verify the dualstack connectivity between multihoming pods after deleting ovn-northbound-leader pod. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60511-Multihoming Verify the dualstack connectivity between multihoming pods after deleting ovn-northbound-leader pod. [Disruptive]", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
g.By("Delete ovn-northbound-leader pod")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnMasterPodName, "-n", "openshift-ovn-kubernetes").Execute()).NotTo(o.HaveOccurred())
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
74513007-bffa-4548-b655-a3fa1dfcc001
|
Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60512-Multihoming Verify the dualstack connectivity between multihoming pods after deleting all ovnkube-master pods. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60512-Multihoming Verify the dualstack connectivity between multihoming pods after deleting all ovnkube-master pods. [Disruptive]", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
g.By("Delete all ovnkube-control-plane pods")
ovnMasterPodNames := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
for _, ovnPod := range ovnMasterPodNames {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnPod, "-n", "openshift-ovn-kubernetes").Execute()).NotTo(o.HaveOccurred())
}
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
de022daf-8f98-4b7b-b999-4edd46953bcb
|
Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60516-Multihoming Verify the dualstack connectivity between multihoming pods after deleting all ovnkube-node pods. [Disruptive]
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("Longduration-NonPreRelease-NonHyperShiftHOST-Author:weliang-Medium-60516-Multihoming Verify the dualstack connectivity between multihoming pods after deleting all ovnkube-node pods. [Disruptive]", func() {
var podName, podEnvName, podIPv4, podIPv6 []string
var ovnMasterPodName, ns, nadName string
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName = multihomingBeforeCheck(oc, value)
g.By("Delete all ovnkube-node pods")
ovnNodePodNames := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
for _, ovnPod := range ovnNodePodNames {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnPod, "-n", "openshift-ovn-kubernetes").Execute()).NotTo(o.HaveOccurred())
}
multihomingAfterCheck(oc, podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns, nadName)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
023e34f4-8501-4a8d-82d1-d14cfe539d3d
|
NonHyperShiftHOST-Author:weliang-Medium-60564-Multihoming Verify the connectivity between multihoming pods without setting subnets
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60564-Multihoming Verify the connectivity between multihoming pods without setting subnets", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv4network60564"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create a pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Verify the pod will fail to get IP from it's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
getPodMultiNetworkFail(oc, ns1, pod1Name[0])
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
fa35c033-ff6c-4efd-93b7-689c121508fa
|
NonHyperShiftHOST-Author:weliang-Medium-63186-Multihoming Verify the connectivity between multihoming pods with static IP
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-63186-Multihoming Verify the connectivity between multihoming pods with static IP", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingStaticPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-staticpod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2ipv4network63186"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
g.By("Create 1st pod consuming above network-attach-defintion in ns1")
ip1 := "192.168.10.10" + "/" + "24"
pod1 := testMultihomingStaticPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
macaddress: "02:03:04:05:06:01",
ipaddress: ip1,
template: multihomingStaticPodTemplate,
}
pod1.createTestMultihomingStaticPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming above network-attach-defintion in ns1")
ip2 := "192.168.10.20" + "/" + "24"
pod2 := testMultihomingStaticPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
macaddress: "02:03:04:05:06:02",
ipaddress: ip2,
template: multihomingStaticPodTemplate,
}
pod2.createTestMultihomingStaticPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming above network-attach-defintion in ns1")
ip3 := "192.168.10.30" + "/" + "24"
pod3 := testMultihomingStaticPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
macaddress: "02:03:04:05:06:03",
ipaddress: ip3,
template: multihomingStaticPodTemplate,
}
pod3.createTestMultihomingStaticPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
g.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
g.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
g.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
g.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv4, "net1", pod3.podenvname)
g.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv4, "net1", pod1.podenvname)
g.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv4, "net1", pod2.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
8d84b850-e1d0-46e4-b038-7979359d0947
|
NonHyperShiftHOST-Author:weliang-Medium-60510-Multihoming Verify multihoming pods with multiple attachments to the same OVN-K networks
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-60510-Multihoming Verify multihoming pods with multiple attachments to the same OVN-K networks", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingSharenetNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-sharenet-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a test namespace")
ns1 := oc.Namespace()
nadName1 := "layer2dualstacknetwork1"
nsWithnad1 := ns1 + "/" + nadName1
nadName2 := "layer2dualstacknetwork2"
nsWithnad2 := ns1 + "/" + nadName2
sharenet := "192.168.100.0/24,fd00:dead:beef::0/64"
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
g.By("Create two custom resource network-attach-defintions in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName1, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName1,
subnets: sharenet,
nswithnadname: nsWithnad1,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName2, "-n", ns1).Execute()
nad1ns2 := multihomingSharenetNAD{
namespace: ns1,
nadname: nadName2,
subnets: sharenet,
nswithnadname: nsWithnad2,
excludeSubnets: "",
topology: value,
sharenetname: nadName1,
template: multihomingSharenetNADTemplate,
}
nad1ns2.createMultihomingSharenetNAD(oc)
g.By("Create 1st pod consuming first network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName1,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
g.By("Create 2nd pod consuming second network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName2,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
g.By("Create 3rd pod consuming second network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName2,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
g.By("Get IPs from the pod1's net1 interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, ns1, pod1Name[0], "net1")
e2e.Logf("The v4 address of pod1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1 is: %v", pod1Net1IPv6)
g.By("Get IPs from the pod2's net1 interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, ns1, pod2Name[0], "net1")
e2e.Logf("The v4 address of pod2 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2 is: %v", pod2Net1IPv6)
g.By("Get IPs from the pod3's net1 interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3Net1IPv4, pod3Net1IPv6 := getPodMultiNetworks(oc, ns1, pod3Name[0], "net1")
e2e.Logf("The v4 address of pod3 is: %v", pod3Net1IPv4)
e2e.Logf("The v6 address of pod3 is: %v", pod3Net1IPv6)
g.By("Checking net1 connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
g.By("Checking net1 connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3Net1IPv6, "net1", pod3.podenvname)
g.By("Checking net1 connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1Net1IPv6, "net1", pod1.podenvname)
g.By("Checking net1 connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2Net1IPv6, "net1", pod2.podenvname)
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
22e26f08-4cfb-47d7-b5e8-63c0efbedc24
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64810-Multihoming verify ingress-ipblock policy. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64810-Multihoming verify ingress-ipblock policy. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
ipBlockIngressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-ingress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "ipblockingress64810"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
var podName, podLabel, podenvName, nodeLocation string
pod := []testMultihomingPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name="+podLabel)).NotTo(o.HaveOccurred())
}
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
exutil.By("Get IPs from the pod4's secondary interface")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod4IPv4, _ := getPodMultiNetwork(oc, ns1, pod4Name[0])
e2e.Logf("The v4 address of pod4 is: %v", pod4IPv4)
exutil.By("Get IPs from the pod5's secondary interface")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod5IPv4, _ := getPodMultiNetwork(oc, ns1, pod5Name[0])
e2e.Logf("The v4 address of pod5 is: %v", pod5IPv4)
exutil.By("Get IPs from the pod6's secondary interface")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
pod6IPv4, _ := getPodMultiNetwork(oc, ns1, pod6Name[0])
e2e.Logf("The v4 address of pod6 is: %v", pod6IPv4)
// Not like multus/whereabouts, six pods will not always get ip addresses in the order of IP's address, need to reroder the
// existing pods' name to the new testpods names by the order of IP's addresses
type podInfor struct {
podName string
podenvName string
}
podData := map[string]podInfor{
pod1IPv4: {podName: pod1Name[0], podenvName: pod[0].podenvname},
pod2IPv4: {podName: pod2Name[0], podenvName: pod[1].podenvname},
pod3IPv4: {podName: pod3Name[0], podenvName: pod[2].podenvname},
pod4IPv4: {podName: pod4Name[0], podenvName: pod[3].podenvname},
pod5IPv4: {podName: pod5Name[0], podenvName: pod[4].podenvname},
pod6IPv4: {podName: pod6Name[0], podenvName: pod[5].podenvname},
}
testpod1IP := "192.168.100.1"
testpod1Name := podData[testpod1IP].podName
testpod1envName := podData[testpod1IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod1 are: %v, %v, %v", testpod1IP, testpod1Name, testpod1envName)
testpod2IP := "192.168.100.2"
testpod2Name := podData[testpod2IP].podName
testpod2envName := podData[testpod2IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod2 are: %v, %v, %v", testpod2IP, testpod2Name, testpod2envName)
testpod3IP := "192.168.100.3"
testpod3Name := podData[testpod3IP].podName
testpod3envName := podData[testpod3IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod3 are: %v, %v, %v", testpod3IP, testpod3Name, testpod3envName)
testpod4IP := "192.168.100.4"
testpod4Name := podData[testpod4IP].podName
testpod4envName := podData[testpod4IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod4 are: %v, %v, %v", testpod4IP, testpod4Name, testpod4envName)
testpod5IP := "192.168.100.5"
testpod5Name := podData[testpod5IP].podName
testpod5envName := podData[testpod5IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod5 are: %v, %v, %v", testpod5IP, testpod5Name, testpod5envName)
testpod6IP := "192.168.100.6"
testpod6Name := podData[testpod6IP].podName
testpod6envName := podData[testpod6IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod6 are: %v, %v, %v", testpod6IP, testpod6Name, testpod6envName)
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("Create ingress ipblock to block the traffic from the pods in the range of 192.168.100.4 to 192.168.100.6")
ipIngressBlock := multihomingIPBlock{
name: "ipblock-ingress",
template: ipBlockIngressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
ipIngressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-ingress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check only the pods which get 192.168.100.4 to 192.168.100.6 can not communicate to others after applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-ingress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
e2e.Logf("Delete all the pods and NAD for topology: %v ----------------------------", value)
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "--all", "-n", ns1).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
_, delNADErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Output()
o.Expect(delNADErr).NotTo(o.HaveOccurred())
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
9ffa28ad-bb42-429e-890c-c50e4ffb9a74
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64811-Multihoming verify egress-ipblock policy. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64811-Multihoming verify egress-ipblock policy. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
ipBlockEgressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-egress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "ipblockingress64811"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
var podName, podLabel, podenvName, nodeLocation string
pod := []testMultihomingPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name="+podLabel)).NotTo(o.HaveOccurred())
}
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
exutil.By("Get IPs from the pod4's secondary interface")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod4IPv4, _ := getPodMultiNetwork(oc, ns1, pod4Name[0])
e2e.Logf("The v4 address of pod4 is: %v", pod4IPv4)
exutil.By("Get IPs from the pod5's secondary interface")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod5IPv4, _ := getPodMultiNetwork(oc, ns1, pod5Name[0])
e2e.Logf("The v4 address of pod5 is: %v", pod5IPv4)
exutil.By("Get IPs from the pod6's secondary interface")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
pod6IPv4, _ := getPodMultiNetwork(oc, ns1, pod6Name[0])
e2e.Logf("The v4 address of pod6 is: %v", pod6IPv4)
// Not like multus/whereabouts, six pods will not always get ip addresses in the order of IP's address, need to reroder the
// existing pods' name to the new testpods names by the order of IP's addresses
type podInfor struct {
podName string
podenvName string
}
podData := map[string]podInfor{
pod1IPv4: {podName: pod1Name[0], podenvName: pod[0].podenvname},
pod2IPv4: {podName: pod2Name[0], podenvName: pod[1].podenvname},
pod3IPv4: {podName: pod3Name[0], podenvName: pod[2].podenvname},
pod4IPv4: {podName: pod4Name[0], podenvName: pod[3].podenvname},
pod5IPv4: {podName: pod5Name[0], podenvName: pod[4].podenvname},
pod6IPv4: {podName: pod6Name[0], podenvName: pod[5].podenvname},
}
testpod1IP := "192.168.100.1"
testpod1Name := podData[testpod1IP].podName
testpod1envName := podData[testpod1IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod1 are: %v, %v, %v", testpod1IP, testpod1Name, testpod1envName)
testpod2IP := "192.168.100.2"
testpod2Name := podData[testpod2IP].podName
testpod2envName := podData[testpod2IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod2 are: %v, %v, %v", testpod2IP, testpod2Name, testpod2envName)
testpod3IP := "192.168.100.3"
testpod3Name := podData[testpod3IP].podName
testpod3envName := podData[testpod3IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod3 are: %v, %v, %v", testpod3IP, testpod3Name, testpod3envName)
testpod4IP := "192.168.100.4"
testpod4Name := podData[testpod4IP].podName
testpod4envName := podData[testpod4IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod4 are: %v, %v, %v", testpod4IP, testpod4Name, testpod4envName)
testpod5IP := "192.168.100.5"
testpod5Name := podData[testpod5IP].podName
testpod5envName := podData[testpod5IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod5 are: %v, %v, %v", testpod5IP, testpod5Name, testpod5envName)
testpod6IP := "192.168.100.6"
testpod6Name := podData[testpod6IP].podName
testpod6envName := podData[testpod6IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod6 are: %v, %v, %v", testpod6IP, testpod6Name, testpod6envName)
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("Create a egress ipblock to block the traffic to the pods in the range of 192.168.100.4 to 192.168.100.6")
ipEgressBlock := multihomingIPBlock{
name: "ipblock-egress",
template: ipBlockEgressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Execute()
ipEgressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-egress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check all pods can communicate to 192.168.100.1-3 but can not communicate to 192.168.100.4-6 after applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodFail(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod6Name, testpod5IP, "net1", testpod5envName)
e2e.Logf("Delete all the pods and NAD for topology: %v ----------------------------", value)
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "--all", "-n", ns1).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
_, delNADErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Output()
o.Expect(delNADErr).NotTo(o.HaveOccurred())
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d37e4a7e-bf7e-4820-b16b-07d6b81d709b
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64812-Multihoming verify ingressandegress-ipblock policy. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-64812-Multihoming verify ingressandegress-ipblock policy. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
ipBlockTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-ingressandegress-template.yaml")
ipv4Cidr = "192.168.100.6/32"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
reloadState := "True.*True.*False"
waitForNetworkOperatorState(oc, 10, 15, reloadState)
normalState := "True.*False.*False"
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "ingressandegress"
nsWithnad := ns1 + "/" + nadName
topology := []string{"layer2"}
for _, value := range topology {
e2e.Logf("Start testing the network topology: %v ----------------------------", value)
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/29",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: value,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
var podName, podLabel, podenvName, nodeLocation string
pod := []testMultihomingPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name="+podLabel)).NotTo(o.HaveOccurred())
}
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, _ := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, _ := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4)
exutil.By("Get IPs from the pod4's secondary interface")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod4IPv4, _ := getPodMultiNetwork(oc, ns1, pod4Name[0])
e2e.Logf("The v4 address of pod4 is: %v", pod4IPv4)
exutil.By("Get IPs from the pod5's secondary interface")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod5IPv4, _ := getPodMultiNetwork(oc, ns1, pod5Name[0])
e2e.Logf("The v4 address of pod5 is: %v", pod5IPv4)
exutil.By("Get IPs from the pod6's secondary interface")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
pod6IPv4, _ := getPodMultiNetwork(oc, ns1, pod6Name[0])
e2e.Logf("The v4 address of pod6 is: %v", pod6IPv4)
// Not like multus/whereabouts, six pods will not always get ip addresses in the order of IP's address, need to reroder the
// existing pods' name to the new testpods names by the order of IP's addresses
type podInfor struct {
podName string
podenvName string
}
podData := map[string]podInfor{
pod1IPv4: {podName: pod1Name[0], podenvName: pod[0].podenvname},
pod2IPv4: {podName: pod2Name[0], podenvName: pod[1].podenvname},
pod3IPv4: {podName: pod3Name[0], podenvName: pod[2].podenvname},
pod4IPv4: {podName: pod4Name[0], podenvName: pod[3].podenvname},
pod5IPv4: {podName: pod5Name[0], podenvName: pod[4].podenvname},
pod6IPv4: {podName: pod6Name[0], podenvName: pod[5].podenvname},
}
testpod1IP := "192.168.100.1"
testpod1Name := podData[testpod1IP].podName
testpod1envName := podData[testpod1IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod1 are: %v, %v, %v", testpod1IP, testpod1Name, testpod1envName)
testpod2IP := "192.168.100.2"
testpod2Name := podData[testpod2IP].podName
testpod2envName := podData[testpod2IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod2 are: %v, %v, %v", testpod2IP, testpod2Name, testpod2envName)
testpod3IP := "192.168.100.3"
testpod3Name := podData[testpod3IP].podName
testpod3envName := podData[testpod3IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod3 are: %v, %v, %v", testpod3IP, testpod3Name, testpod3envName)
testpod4IP := "192.168.100.4"
testpod4Name := podData[testpod4IP].podName
testpod4envName := podData[testpod4IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod4 are: %v, %v, %v", testpod4IP, testpod4Name, testpod4envName)
testpod5IP := "192.168.100.5"
testpod5Name := podData[testpod5IP].podName
testpod5envName := podData[testpod5IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod5 are: %v, %v, %v", testpod5IP, testpod5Name, testpod5envName)
testpod6IP := "192.168.100.6"
testpod6Name := podData[testpod6IP].podName
testpod6envName := podData[testpod6IP].podenvName
e2e.Logf("The podIP, podName and podenvName of testpod6 are: %v, %v, %v", testpod6IP, testpod6Name, testpod6envName)
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod6IP, "net1", testpod6envName)
exutil.By("Create a egress ipblock to allow only ip4.src == 192.168.100.5 to ip4.dst == 192.168.100.6")
ingressandegress := multihomingIPBlock{
name: "ingressandegress",
template: ipBlockTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ingressandegress", "-n", ns1).Execute()
ingressandegress.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ingressandegress"))
exutil.By("Check a ACL rule is created for ip4.src == 192.168.100.5/32")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.src == 192.168.100.5/32"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check a ACL rule is created for ip4.dst == 192.168.100.6/32")
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.dst == 192.168.100.6/32"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check only ip4.src == 192.168.100.5 to ip4.dst == 192.168.100.6 will be allowed after applying policy")
CurlMultusPod2PodFail(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodFail(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodFail(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodFail(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodFail(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodFail(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod6IP, "net1", testpod6envName)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ingressandegress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.src == 192.168.100.5/32"), fmt.Sprintf("Failed to delete policy on the cluster"))
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.dst == 192.168.100.6/32"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod1Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod2Name, testpod5IP, "net1", testpod5envName)
CurlMultusPod2PodPass(oc, ns1, testpod3Name, testpod6IP, "net1", testpod6envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod1IP, "net1", testpod1envName)
CurlMultusPod2PodPass(oc, ns1, testpod4Name, testpod2IP, "net1", testpod2envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod3IP, "net1", testpod3envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod4IP, "net1", testpod4envName)
CurlMultusPod2PodPass(oc, ns1, testpod5Name, testpod6IP, "net1", testpod6envName)
e2e.Logf("Delete all the pods and NAD for topology: %v ----------------------------", value)
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "--all", "-n", ns1).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
_, delNADErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Output()
o.Expect(delNADErr).NotTo(o.HaveOccurred())
e2e.Logf("End testing the network topology: %v ----------------------------", value)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
c7487932-aef0-41d2-b535-62070f2b434d
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-65002-Multihoming verify ingress-ipblock policy with static IP. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-65002-Multihoming verify ingress-ipblock policy with static IP. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-staticpod-template.yaml")
ipBlockIngressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-ingress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
reloadState := "True.*True.*False"
normalState := "True.*False.*False"
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Get the name of testing namespace")
ns1 := oc.Namespace()
nadName := "ipblockingress65002"
nsWithnad := ns1 + "/" + nadName
topology := "layer2"
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: topology,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", ns1).Execute()
var podName, podLabel, podenvName, nodeLocation, macAddress, ipAddress string
pod := []testMultihomingStaticPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
macAddress = "02:03:04:05:06:0" + strconv.Itoa(i)
ipAddress = "192.168.100." + strconv.Itoa(i) + "/" + "29"
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingStaticPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
macaddress: macAddress,
ipaddress: ipAddress,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingStaticPod(oc)
}
exutil.By("Check all pods are online")
for i := 1; i < 7; i++ {
podLabel = "multihoming-pod" + strconv.Itoa(i)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns1, "name="+podLabel), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", podLabel))
}
exutil.By("Get pod's name from each pod")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("Create ingress ipblock to block the traffic from the pods in the range of 192.168.100.4 to 192.168.100.6")
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-ingress", "-n", ns1).Output()
ipIngressBlock := multihomingIPBlock{
name: "ipblock-ingress",
template: ipBlockIngressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
ipIngressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-ingress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check all pods can communicate to 192.168.100.1-3 but can not communicate to 192.168.100.4-6 after applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-ingress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.src == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
exutil.By("All curl should pass again after deleting policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
})
| |||||
test case
|
openshift/openshift-tests-private
|
ce8ff512-f9aa-4e85-abaa-81d0432aa8d7
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-65003-Multihoming verify egress-ipblock policy with static IP. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multihoming.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:weliang-Medium-65003-Multihoming verify egress-ipblock policy with static IP. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-staticpod-template.yaml")
ipBlockEgressTemplate = filepath.Join(buildPruningBaseDir, "ipBlock-egress-template.yaml")
ipv4Cidr = "192.168.100.0/30"
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes")
}
exutil.By("Enable useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
reloadState := "True.*True.*False"
normalState := "True.*False.*False"
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("NetworkOperatorStatus should back to normal after enable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 15, reloadState)
waitForNetworkOperatorState(oc, 10, 15, normalState)
exutil.By("Get the name of a namespace")
ns1 := oc.Namespace()
nadName := "ipblockegress65003"
nsWithnad := ns1 + "/" + nadName
topology := "layer2"
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: topology,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create six testing pods consuming above network-attach-defintion in ns1")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", ns1).Execute()
var podName, podLabel, podenvName, nodeLocation, macAddress, ipAddress string
pod := []testMultihomingStaticPod{}
for i := 1; i < 7; i++ {
podName = "multihoming-pod-" + strconv.Itoa(i)
podLabel = "multihoming-pod" + strconv.Itoa(i)
podenvName = "Hello multihoming-pod-" + strconv.Itoa(i)
macAddress = "02:03:04:05:06:0" + strconv.Itoa(i)
ipAddress = "192.168.100." + strconv.Itoa(i) + "/" + "29"
//Create the pods in different nodes.
if i < 4 {
nodeLocation = nodeList.Items[0].Name
} else {
nodeLocation = nodeList.Items[1].Name
}
p := testMultihomingStaticPod{
name: podName,
namespace: ns1,
podlabel: podLabel,
nadname: nadName,
nodename: nodeLocation,
podenvname: podenvName,
macaddress: macAddress,
ipaddress: ipAddress,
template: multihomingPodTemplate,
}
pod = append(pod, p)
p.createTestMultihomingStaticPod(oc)
}
exutil.By("Check all pods are online")
for i := 1; i < 7; i++ {
podLabel = "multihoming-pod" + strconv.Itoa(i)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns1, "name="+podLabel), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", podLabel))
}
exutil.By("Get pod's name from each pod")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod4Name := getPodName(oc, ns1, "name=multihoming-pod4")
pod5Name := getPodName(oc, ns1, "name=multihoming-pod5")
pod6Name := getPodName(oc, ns1, "name=multihoming-pod6")
exutil.By("All curls should pass before applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("Create a egress ipblock to block the traffic to the pods in the range of 192.168.100.4 to 192.168.100.6")
defer oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Execute()
ipEgressBlock := multihomingIPBlock{
name: "ipblock-egress",
template: ipBlockEgressTemplate,
cidr: ipv4Cidr,
namespace: ns1,
policyfor: nsWithnad,
}
ipEgressBlock.createMultihomingipBlockIngressObject(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("ipblock-egress"))
exutil.By("Check a ACL rule is created for 192.168.100.0/30")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listACLCmd := "ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl"
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL applied, %v", listErr)
}
return listOutput
}, "60s", "10s").Should(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to apply policy on the cluster"))
exutil.By("Check all pods can communicate to 192.168.100.1-3 but can not communicate to 192.168.100.4-6 after applying policy")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodFail(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
exutil.By("All curl should pass again after deleting policy")
_, policydelerr := oc.AsAdmin().Run("delete").Args("multi-networkpolicy", "ipblock-egress", "-n", ns1).Output()
o.Expect(policydelerr).NotTo(o.HaveOccurred())
ovnMasterPodNewName := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodNewName, listACLCmd)
if listErr != nil {
e2e.Logf("Wait for policy ACL deleted, %v", listErr)
}
return listOutput
}, "60s", "10s").ShouldNot(o.ContainSubstring("ip4.dst == 192.168.100.0/30"), fmt.Sprintf("Failed to delete policy on the cluster"))
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], "192.168.100.5", "net1", pod[4].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], "192.168.100.6", "net1", pod[5].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.1", "net1", pod[0].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod4Name[0], "192.168.100.2", "net1", pod[1].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.3", "net1", pod[2].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod5Name[0], "192.168.100.4", "net1", pod[3].podenvname)
CurlMultusPod2PodPass(oc, ns1, pod6Name[0], "192.168.100.5", "net1", pod[4].podenvname)
})
| |||||
test
|
openshift/openshift-tests-private
|
ef7fd3b3-5b14-4d0a-9254-336998ba7053
|
node_identity
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/node_identity.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = g.Describe("[sig-networking] SDN node-identity", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-node-identity", exutil.KubeConfigPath())
notFountMsg = fmt.Sprintf("\"network-node-identity.openshift.io\" not found")
opNamespace = "openshift-network-operator"
cmName = "network-node-identity"
)
g.BeforeEach(func() {
// Check network node identity webhook is enabled on cluster
webhook, err := checkNodeIdentityWebhook(oc)
networkType := checkNetworkType(oc)
if err != nil || strings.Contains(webhook, notFountMsg) || !strings.Contains(networkType, "ovn") {
g.Skip("The cluster does not have node identity webhook enabled or OVN network plugin, skipping tests")
}
e2e.Logf("The Node Identity webhook enabled on the cluster : %s", webhook)
o.Expect(strings.Split(webhook, " ")).Should(o.HaveLen(2))
})
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:asood-High-68157-Node identity validating webhook can be disabled and enabled successfully [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
patchEnableWebhook = fmt.Sprintf("{\"data\":{\"enabled\":\"true\"}}")
)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create config map to disable webhook")
_, err := disableNodeIdentityWebhook(oc, opNamespace, cmName)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
patchResourceAsAdmin(oc, "configmap/"+cmName, patchEnableWebhook, opNamespace)
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", cmName, "-n", opNamespace).Execute()
webhook, err := checkNodeIdentityWebhook(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Split(webhook, " ")).Should(o.HaveLen(2))
}()
exutil.By("NetworkOperatorStatus should back to normal after webhook is disabled")
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
exutil.By("Verify the webhook is disabled")
webhook, _ := checkNodeIdentityWebhook(oc)
o.Expect(strings.Contains(webhook, notFountMsg)).To(o.BeTrue())
exutil.By("Verify pod is successfully scheduled on a node without the validating webhook")
pod1 := pingPodResource{
name: "hello-pod-1",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Enable the webhook again")
patchResourceAsAdmin(oc, "configmap/"+cmName, patchEnableWebhook, opNamespace)
exutil.By("NetworkOperatorStatus should back to normal after webhook is enabled")
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
webhook, err = checkNodeIdentityWebhook(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Split(webhook, " ")).Should(o.HaveLen(2))
exutil.By("Verify pod is successfully scheduled on a node after the webhook is enabled")
pod2 := pingPodResource{
name: "hello-pod-2",
namespace: ns,
template: pingPodTemplate,
}
pod2.createPingPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
})
g.It("NonHyperShiftHOST-Author:asood-High-68156-ovnkube-node should be modifying annotations on its own node and pods only.[Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
caseID = "68156"
kubeconfigFilePath = "/tmp/kubeconfig-" + caseID
userContext = "default-context"
)
exutil.By("Get list of nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
workerNodeCount := len(nodeList.Items)
o.Expect(workerNodeCount == 0).ShouldNot(o.BeTrue())
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By(fmt.Sprintf("Get ovnkube-node pod name for a node %s", nodeList.Items[0].Name))
ovnKubeNodePodName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovnKubeNodePodName).NotTo(o.BeEmpty())
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("node", nodeList.Items[0].Name, "k8s.ovn.org/node-mgmt-port-").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
_, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", "rm -f /tmp/*.yaml")
o.Expect(cmdErr).NotTo(o.HaveOccurred())
_, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("rm -f %s", kubeconfigFilePath))
o.Expect(cmdErr).NotTo(o.HaveOccurred())
}()
exutil.By(fmt.Sprintf("Create a kubeconfig file on the node %s", nodeList.Items[0].Name))
o.Expect(generateKubeConfigFileForContext(oc, nodeList.Items[0].Name, ovnKubeNodePodName, kubeconfigFilePath, userContext)).To(o.BeTrue())
exutil.By("Verify pod is successfully scheduled on a node")
podns := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns.createPingPodNode(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Generate YAML for the pod and save it on node")
_, podFileErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; oc -n %s get pod %s -o json > /tmp/%s-%s.yaml", kubeconfigFilePath, podns.namespace, podns.name, podns.name, caseID))
o.Expect(podFileErr).NotTo(o.HaveOccurred())
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("Generate YAML for the node %s and save it on node", nodeList.Items[i].Name))
_, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; oc get node %s -o json > /tmp/node-%s-%s.yaml", kubeconfigFilePath, nodeList.Items[i].Name, caseID, strconv.Itoa(i)))
o.Expect(cmdErr).NotTo(o.HaveOccurred())
//single node cluster case
if workerNodeCount == 1 {
break
}
}
exutil.By("Verify the annotation can be added to the node where ovnkube-node is impersonated")
patchNodePayload := `[{"op": "add", "path": "/metadata/annotations/k8s.ovn.org~1node-mgmt-port", "value":"{\"PfId\":1, \"FuncId\":1}"}]`
patchNodeCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl patch -f /tmp/node-%s-0.yaml --type='json' --subresource=status -p='%s'", kubeconfigFilePath, caseID, patchNodePayload)
cmdOutput, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; %s", kubeconfigFilePath, patchNodeCmd))
o.Expect(cmdErr).NotTo(o.HaveOccurred())
e2e.Logf(cmdOutput)
if workerNodeCount > 1 {
exutil.By("Verify the annotation cannot be added to the node where ovnkube-node is not impersonated")
patchNodeCmd = fmt.Sprintf("export KUBECONFIG=%s; kubectl patch -f /tmp/node-%s-1.yaml --type='json' --subresource=status -p='%s'", kubeconfigFilePath, caseID, patchNodePayload)
_, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; %s", kubeconfigFilePath, patchNodeCmd))
o.Expect(cmdErr).To(o.HaveOccurred())
}
exutil.By("Verify ovnkube-node is not allowed to add the annotation to pod")
patchPodDisallowedPayload := `[{"op": "add", "path": "/metadata/annotations/description", "value":"{\"hello-pod\"}"}]`
patchPodCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl -n %s patch -f /tmp/%s-%s.yaml --type='json' --subresource=status -p='%s'", kubeconfigFilePath, podns.namespace, podns.name, caseID, patchPodDisallowedPayload)
_, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; %s", kubeconfigFilePath, patchPodCmd))
o.Expect(cmdErr).To(o.HaveOccurred())
})
})
var _ = g.Describe("[sig-networking] SDN node-identity", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("node", exutil.KubeConfigPath())
)
g.It("Longduration-NonPreRelease-Author:asood-Critical-68690-When adding nodes, the overlapped node-subnet should not be allocated. [Disruptive]", func() {
exutil.By("1. Create a new machineset, get the new node created\n")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-68690"
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSet(oc)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
for i := 0; i < 2; i++ {
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[i])
e2e.Logf("Node with name %v added to cluster", nodeName)
}
exutil.By("2. Check host subnet is not over lapping for the nodes\n")
nodeList, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
similarSubnetNodesFound, _ := findNodesWithSameSubnet(oc, nodeList)
o.Expect(similarSubnetNodesFound).To(o.BeFalse())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
2ab6c24c-4a22-4b05-84d8-363a4afe17b0
|
NonHyperShiftHOST-Longduration-NonPreRelease-Author:asood-High-68157-Node identity validating webhook can be disabled and enabled successfully [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/node_identity.go
|
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:asood-High-68157-Node identity validating webhook can be disabled and enabled successfully [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
patchEnableWebhook = fmt.Sprintf("{\"data\":{\"enabled\":\"true\"}}")
)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create config map to disable webhook")
_, err := disableNodeIdentityWebhook(oc, opNamespace, cmName)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
patchResourceAsAdmin(oc, "configmap/"+cmName, patchEnableWebhook, opNamespace)
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", cmName, "-n", opNamespace).Execute()
webhook, err := checkNodeIdentityWebhook(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Split(webhook, " ")).Should(o.HaveLen(2))
}()
exutil.By("NetworkOperatorStatus should back to normal after webhook is disabled")
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
exutil.By("Verify the webhook is disabled")
webhook, _ := checkNodeIdentityWebhook(oc)
o.Expect(strings.Contains(webhook, notFountMsg)).To(o.BeTrue())
exutil.By("Verify pod is successfully scheduled on a node without the validating webhook")
pod1 := pingPodResource{
name: "hello-pod-1",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Enable the webhook again")
patchResourceAsAdmin(oc, "configmap/"+cmName, patchEnableWebhook, opNamespace)
exutil.By("NetworkOperatorStatus should back to normal after webhook is enabled")
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
webhook, err = checkNodeIdentityWebhook(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Split(webhook, " ")).Should(o.HaveLen(2))
exutil.By("Verify pod is successfully scheduled on a node after the webhook is enabled")
pod2 := pingPodResource{
name: "hello-pod-2",
namespace: ns,
template: pingPodTemplate,
}
pod2.createPingPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
13ebff2f-785d-4879-9469-8c09af99ecfb
|
NonHyperShiftHOST-Author:asood-High-68156-ovnkube-node should be modifying annotations on its own node and pods only.[Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/node_identity.go
|
g.It("NonHyperShiftHOST-Author:asood-High-68156-ovnkube-node should be modifying annotations on its own node and pods only.[Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
caseID = "68156"
kubeconfigFilePath = "/tmp/kubeconfig-" + caseID
userContext = "default-context"
)
exutil.By("Get list of nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
workerNodeCount := len(nodeList.Items)
o.Expect(workerNodeCount == 0).ShouldNot(o.BeTrue())
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By(fmt.Sprintf("Get ovnkube-node pod name for a node %s", nodeList.Items[0].Name))
ovnKubeNodePodName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovnKubeNodePodName).NotTo(o.BeEmpty())
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("node", nodeList.Items[0].Name, "k8s.ovn.org/node-mgmt-port-").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
_, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", "rm -f /tmp/*.yaml")
o.Expect(cmdErr).NotTo(o.HaveOccurred())
_, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("rm -f %s", kubeconfigFilePath))
o.Expect(cmdErr).NotTo(o.HaveOccurred())
}()
exutil.By(fmt.Sprintf("Create a kubeconfig file on the node %s", nodeList.Items[0].Name))
o.Expect(generateKubeConfigFileForContext(oc, nodeList.Items[0].Name, ovnKubeNodePodName, kubeconfigFilePath, userContext)).To(o.BeTrue())
exutil.By("Verify pod is successfully scheduled on a node")
podns := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns.createPingPodNode(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Generate YAML for the pod and save it on node")
_, podFileErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; oc -n %s get pod %s -o json > /tmp/%s-%s.yaml", kubeconfigFilePath, podns.namespace, podns.name, podns.name, caseID))
o.Expect(podFileErr).NotTo(o.HaveOccurred())
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("Generate YAML for the node %s and save it on node", nodeList.Items[i].Name))
_, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; oc get node %s -o json > /tmp/node-%s-%s.yaml", kubeconfigFilePath, nodeList.Items[i].Name, caseID, strconv.Itoa(i)))
o.Expect(cmdErr).NotTo(o.HaveOccurred())
//single node cluster case
if workerNodeCount == 1 {
break
}
}
exutil.By("Verify the annotation can be added to the node where ovnkube-node is impersonated")
patchNodePayload := `[{"op": "add", "path": "/metadata/annotations/k8s.ovn.org~1node-mgmt-port", "value":"{\"PfId\":1, \"FuncId\":1}"}]`
patchNodeCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl patch -f /tmp/node-%s-0.yaml --type='json' --subresource=status -p='%s'", kubeconfigFilePath, caseID, patchNodePayload)
cmdOutput, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; %s", kubeconfigFilePath, patchNodeCmd))
o.Expect(cmdErr).NotTo(o.HaveOccurred())
e2e.Logf(cmdOutput)
if workerNodeCount > 1 {
exutil.By("Verify the annotation cannot be added to the node where ovnkube-node is not impersonated")
patchNodeCmd = fmt.Sprintf("export KUBECONFIG=%s; kubectl patch -f /tmp/node-%s-1.yaml --type='json' --subresource=status -p='%s'", kubeconfigFilePath, caseID, patchNodePayload)
_, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; %s", kubeconfigFilePath, patchNodeCmd))
o.Expect(cmdErr).To(o.HaveOccurred())
}
exutil.By("Verify ovnkube-node is not allowed to add the annotation to pod")
patchPodDisallowedPayload := `[{"op": "add", "path": "/metadata/annotations/description", "value":"{\"hello-pod\"}"}]`
patchPodCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl -n %s patch -f /tmp/%s-%s.yaml --type='json' --subresource=status -p='%s'", kubeconfigFilePath, podns.namespace, podns.name, caseID, patchPodDisallowedPayload)
_, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePodName, "ovnkube-controller", fmt.Sprintf("export KUBECONFIG=%s; %s", kubeconfigFilePath, patchPodCmd))
o.Expect(cmdErr).To(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
bc7ac85f-39bd-4b58-8c93-ef29ff173962
|
Longduration-NonPreRelease-Author:asood-Critical-68690-When adding nodes, the overlapped node-subnet should not be allocated. [Disruptive]
|
['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/node_identity.go
|
g.It("Longduration-NonPreRelease-Author:asood-Critical-68690-When adding nodes, the overlapped node-subnet should not be allocated. [Disruptive]", func() {
exutil.By("1. Create a new machineset, get the new node created\n")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-68690"
machineSet := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSet(oc)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
for i := 0; i < 2; i++ {
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[i])
e2e.Logf("Node with name %v added to cluster", nodeName)
}
exutil.By("2. Check host subnet is not over lapping for the nodes\n")
nodeList, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
similarSubnetNodesFound, _ := findNodesWithSameSubnet(oc, nodeList)
o.Expect(similarSubnetNodesFound).To(o.BeFalse())
})
| |||||
test
|
openshift/openshift-tests-private
|
72e26a13-5732-4668-b657-53fc20dc0e18
|
ovn_sd_alerts
|
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
package networking
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN alerts", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-alerts", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-51438-Upgrade NoRunningOvnControlPlane to critical severity and inclue runbook.", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NoRunningOvnControlPlane"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoRunningOvnControlPlane\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("critical"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoRunningOvnControlPlane\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NoRunningOvnControlPlane.md"))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-51439-Upgrade NoOvnClusterManagerLeader to critical severity and inclue runbook.", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NoOvnClusterManagerLeader"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoOvnClusterManagerLeader\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("critical"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoOvnClusterManagerLeader\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NoOvnClusterManagerLeader.md"))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-51722-Create runbook and link SOP for SouthboundStale alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("SouthboundStale"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"SouthboundStale\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"SouthboundStale\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/SouthboundStaleAlert.md"))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-51724-Create runbook and link SOP for V4SubnetAllocationThresholdExceeded alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("V4SubnetAllocationThresholdExceeded"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"V4SubnetAllocationThresholdExceeded\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"V4SubnetAllocationThresholdExceeded\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/V4SubnetAllocationThresholdExceeded.md"))
})
g.It("Author:weliang-Medium-51726-Create runbook and link SOP for NodeWithoutOVNKubeNodePodRunning alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NodeWithoutOVNKubeNodePodRunning"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NodeWithoutOVNKubeNodePodRunning\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NodeWithoutOVNKubeNodePodRunning\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NodeWithoutOVNKubeNodePodRunning.md"))
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-51723-bug 2094068 Create runbook and link SOP for NorthboundStale alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NorthboundStale"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NorthboundStale\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NorthboundStale\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NorthboundStaleAlert.md"))
})
g.It("Author:qiowang-Medium-53999-OVN-K alerts for ovn controller disconnection", func() {
alertSeverity, alertExpr, runBook := getOVNAlertNetworkingRules(oc, "OVNKubernetesControllerDisconnectedSouthboundDatabase")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("max_over_time(ovn_controller_southbound_database_connected[5m]) == 0"))
o.Expect(runBook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/OVNKubernetesControllerDisconnectedSouthboundDatabase.md"))
})
g.It("Author:qiowang-Medium-60705-Verify alert OVNKubernetesNodeOVSOverflowKernel", func() {
alertSeverity, alertExpr, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodeOVSOverflowKernel")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("increase(ovs_vswitchd_dp_flows_lookup_lost[5m]) > 0"))
})
g.It("Author:qiowang-Medium-60706-Verify alert OVNKubernetesNodeOVSOverflowUserspace", func() {
alertSeverity, alertExpr, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodeOVSOverflowUserspace")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("increase(ovs_vswitchd_netlink_overflow[5m]) > 0"))
})
g.It("Author:qiowang-Medium-60709-Verify alert OVNKubernetesResourceRetryFailure", func() {
alertSeverity, alertExpr, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesResourceRetryFailure")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("increase(ovnkube_resource_retry_failures_total[10m]) > 0"))
})
g.It("Author:qiowang-Medium-72328-Verify alert OVNKubernetesNodePodAddError and OVNKubernetesNodePodDeleteError", func() {
alertSeverity1, alertExpr1, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodePodAddError")
o.Expect(alertSeverity1).To(o.ContainSubstring("warning"))
o.Expect(alertExpr1).To(o.ContainSubstring(`sum by(instance, namespace) (rate(ovnkube_node_cni_request_duration_seconds_count{command="ADD",err="true"}[5m]))`))
alertSeverity2, alertExpr2, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodePodDeleteError")
o.Expect(alertSeverity2).To(o.ContainSubstring("warning"))
o.Expect(alertExpr2).To(o.ContainSubstring(`sum by(instance, namespace) (rate(ovnkube_node_cni_request_duration_seconds_count{command="DEL",err="true"}[5m]))`))
})
g.It("NonHyperShiftHOST-Author:qiowang-Medium-72329-Verify alert OVNKubernetesNorthboundDatabaseCPUUsagehigh and OVNKubernetesSouthboundDatabaseCPUUsagehigh", func() {
alertSeverity1, alertExpr1, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNorthboundDatabaseCPUUsageHigh")
o.Expect(alertSeverity1).To(o.ContainSubstring("info"))
o.Expect(alertExpr1).To(o.ContainSubstring(`(sum(rate(container_cpu_usage_seconds_total{container="nbdb"}[5m])) BY`))
o.Expect(alertExpr1).To(o.ContainSubstring(`(instance, name, namespace)) > 0.8`))
alertSeverity2, alertExpr2, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesSouthboundDatabaseCPUUsageHigh")
o.Expect(alertSeverity2).To(o.ContainSubstring("info"))
o.Expect(alertExpr2).To(o.ContainSubstring(`(sum(rate(container_cpu_usage_seconds_total{container="sbdb"}[5m])) BY`))
o.Expect(alertExpr2).To(o.ContainSubstring(`(instance, name, namespace)) > 0.8`))
})
g.It("NonHyperShiftHOST-Author:qiowang-Medium-72330-Verify alert V6SubnetAllocationThresholdExceeded", func() {
alertSeverity, alertExpr, _ := getOVNAlertMasterRules(oc, "V6SubnetAllocationThresholdExceeded")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring(`ovnkube_clustermanager_allocated_v6_host_subnets / ovnkube_clustermanager_num_v6_host_subnets`))
o.Expect(alertExpr).To(o.ContainSubstring(`> 0.8`))
})
g.It("Author:qiowang-NonHyperShiftHOST-Medium-53926-OVN-K alerts for ovn northd inactivity", func() {
alertSeverity, alertExpr, runBook := getOVNAlertNetworkingRules(oc, "OVNKubernetesNorthdInactive")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring(`count(ovn_northd_status != 1) BY (instance, name, namespace) > 0`))
o.Expect(runBook).To(o.ContainSubstring(`https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/OVNKubernetesNorthdInactive.md`))
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
f4bebe5f-2ba0-4b8b-b70c-6b573a16952b
|
NonHyperShiftHOST-Author:weliang-Medium-51438-Upgrade NoRunningOvnControlPlane to critical severity and inclue runbook.
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-51438-Upgrade NoRunningOvnControlPlane to critical severity and inclue runbook.", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NoRunningOvnControlPlane"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoRunningOvnControlPlane\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("critical"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoRunningOvnControlPlane\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NoRunningOvnControlPlane.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
9787632e-35cd-4028-9dd2-7a6689363238
|
NonHyperShiftHOST-Author:weliang-Medium-51439-Upgrade NoOvnClusterManagerLeader to critical severity and inclue runbook.
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-51439-Upgrade NoOvnClusterManagerLeader to critical severity and inclue runbook.", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NoOvnClusterManagerLeader"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoOvnClusterManagerLeader\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("critical"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NoOvnClusterManagerLeader\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NoOvnClusterManagerLeader.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6955c6f9-4d68-4a83-9c74-cc57477c1ad3
|
NonHyperShiftHOST-Author:weliang-Medium-51722-Create runbook and link SOP for SouthboundStale alert
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-51722-Create runbook and link SOP for SouthboundStale alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("SouthboundStale"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"SouthboundStale\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"SouthboundStale\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/SouthboundStaleAlert.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7b9e851b-1c62-497f-8779-dd7b07eadf99
|
NonHyperShiftHOST-Author:weliang-Medium-51724-Create runbook and link SOP for V4SubnetAllocationThresholdExceeded alert
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-51724-Create runbook and link SOP for V4SubnetAllocationThresholdExceeded alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("V4SubnetAllocationThresholdExceeded"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"V4SubnetAllocationThresholdExceeded\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"V4SubnetAllocationThresholdExceeded\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/V4SubnetAllocationThresholdExceeded.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a0a412d7-4574-41a3-aa76-e6788b6d1006
|
Author:weliang-Medium-51726-Create runbook and link SOP for NodeWithoutOVNKubeNodePodRunning alert
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:weliang-Medium-51726-Create runbook and link SOP for NodeWithoutOVNKubeNodePodRunning alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NodeWithoutOVNKubeNodePodRunning"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NodeWithoutOVNKubeNodePodRunning\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NodeWithoutOVNKubeNodePodRunning\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NodeWithoutOVNKubeNodePodRunning.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
9175c77e-6618-40ed-90f6-46e4fe5021ae
|
NonHyperShiftHOST-Author:weliang-Medium-51723-bug 2094068 Create runbook and link SOP for NorthboundStale alert
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-51723-bug 2094068 Create runbook and link SOP for NorthboundStale alert", func() {
alertName, NameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output()
o.Expect(NameErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertName is %v", alertName)
o.Expect(alertName).To(o.ContainSubstring("NorthboundStale"))
alertSeverity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NorthboundStale\")].labels.severity}").Output()
o.Expect(severityErr).NotTo(o.HaveOccurred())
e2e.Logf("alertSeverity is %v", alertSeverity)
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
alertRunbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", "openshift-ovn-kubernetes", "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\"NorthboundStale\")].annotations.runbook_url}").Output()
o.Expect(runbookErr).NotTo(o.HaveOccurred())
e2e.Logf("The alertRunbook is %v", alertRunbook)
o.Expect(alertRunbook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/NorthboundStaleAlert.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3faca3e0-70ab-4fd8-8520-6f4745fea09f
|
Author:qiowang-Medium-53999-OVN-K alerts for ovn controller disconnection
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:qiowang-Medium-53999-OVN-K alerts for ovn controller disconnection", func() {
alertSeverity, alertExpr, runBook := getOVNAlertNetworkingRules(oc, "OVNKubernetesControllerDisconnectedSouthboundDatabase")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("max_over_time(ovn_controller_southbound_database_connected[5m]) == 0"))
o.Expect(runBook).To(o.ContainSubstring("https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/OVNKubernetesControllerDisconnectedSouthboundDatabase.md"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
872e1dc0-02ff-4dff-8807-334cf9e3861d
|
Author:qiowang-Medium-60705-Verify alert OVNKubernetesNodeOVSOverflowKernel
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:qiowang-Medium-60705-Verify alert OVNKubernetesNodeOVSOverflowKernel", func() {
alertSeverity, alertExpr, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodeOVSOverflowKernel")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("increase(ovs_vswitchd_dp_flows_lookup_lost[5m]) > 0"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
66c3c92d-7d22-4fb1-9845-1c76aaf7a08e
|
Author:qiowang-Medium-60706-Verify alert OVNKubernetesNodeOVSOverflowUserspace
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:qiowang-Medium-60706-Verify alert OVNKubernetesNodeOVSOverflowUserspace", func() {
alertSeverity, alertExpr, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodeOVSOverflowUserspace")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("increase(ovs_vswitchd_netlink_overflow[5m]) > 0"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
aefbe9d6-7e87-4268-91b6-ff4653006789
|
Author:qiowang-Medium-60709-Verify alert OVNKubernetesResourceRetryFailure
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:qiowang-Medium-60709-Verify alert OVNKubernetesResourceRetryFailure", func() {
alertSeverity, alertExpr, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesResourceRetryFailure")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring("increase(ovnkube_resource_retry_failures_total[10m]) > 0"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
badc51fc-2d14-47ad-9b29-bd07519a4acc
|
Author:qiowang-Medium-72328-Verify alert OVNKubernetesNodePodAddError and OVNKubernetesNodePodDeleteError
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:qiowang-Medium-72328-Verify alert OVNKubernetesNodePodAddError and OVNKubernetesNodePodDeleteError", func() {
alertSeverity1, alertExpr1, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodePodAddError")
o.Expect(alertSeverity1).To(o.ContainSubstring("warning"))
o.Expect(alertExpr1).To(o.ContainSubstring(`sum by(instance, namespace) (rate(ovnkube_node_cni_request_duration_seconds_count{command="ADD",err="true"}[5m]))`))
alertSeverity2, alertExpr2, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNodePodDeleteError")
o.Expect(alertSeverity2).To(o.ContainSubstring("warning"))
o.Expect(alertExpr2).To(o.ContainSubstring(`sum by(instance, namespace) (rate(ovnkube_node_cni_request_duration_seconds_count{command="DEL",err="true"}[5m]))`))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0592c551-9859-4193-9fba-1211c4332b0d
|
NonHyperShiftHOST-Author:qiowang-Medium-72329-Verify alert OVNKubernetesNorthboundDatabaseCPUUsagehigh and OVNKubernetesSouthboundDatabaseCPUUsagehigh
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:qiowang-Medium-72329-Verify alert OVNKubernetesNorthboundDatabaseCPUUsagehigh and OVNKubernetesSouthboundDatabaseCPUUsagehigh", func() {
alertSeverity1, alertExpr1, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesNorthboundDatabaseCPUUsageHigh")
o.Expect(alertSeverity1).To(o.ContainSubstring("info"))
o.Expect(alertExpr1).To(o.ContainSubstring(`(sum(rate(container_cpu_usage_seconds_total{container="nbdb"}[5m])) BY`))
o.Expect(alertExpr1).To(o.ContainSubstring(`(instance, name, namespace)) > 0.8`))
alertSeverity2, alertExpr2, _ := getOVNAlertNetworkingRules(oc, "OVNKubernetesSouthboundDatabaseCPUUsageHigh")
o.Expect(alertSeverity2).To(o.ContainSubstring("info"))
o.Expect(alertExpr2).To(o.ContainSubstring(`(sum(rate(container_cpu_usage_seconds_total{container="sbdb"}[5m])) BY`))
o.Expect(alertExpr2).To(o.ContainSubstring(`(instance, name, namespace)) > 0.8`))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
26b8fe86-524d-4b78-a743-f724f6a60dd4
|
NonHyperShiftHOST-Author:qiowang-Medium-72330-Verify alert V6SubnetAllocationThresholdExceeded
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("NonHyperShiftHOST-Author:qiowang-Medium-72330-Verify alert V6SubnetAllocationThresholdExceeded", func() {
alertSeverity, alertExpr, _ := getOVNAlertMasterRules(oc, "V6SubnetAllocationThresholdExceeded")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring(`ovnkube_clustermanager_allocated_v6_host_subnets / ovnkube_clustermanager_num_v6_host_subnets`))
o.Expect(alertExpr).To(o.ContainSubstring(`> 0.8`))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
2aa66a10-3dce-4743-8cf8-cfb3e49b5094
|
Author:qiowang-NonHyperShiftHOST-Medium-53926-OVN-K alerts for ovn northd inactivity
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_sd_alerts.go
|
g.It("Author:qiowang-NonHyperShiftHOST-Medium-53926-OVN-K alerts for ovn northd inactivity", func() {
alertSeverity, alertExpr, runBook := getOVNAlertNetworkingRules(oc, "OVNKubernetesNorthdInactive")
o.Expect(alertSeverity).To(o.ContainSubstring("warning"))
o.Expect(alertExpr).To(o.ContainSubstring(`count(ovn_northd_status != 1) BY (instance, name, namespace) > 0`))
o.Expect(runBook).To(o.ContainSubstring(`https://github.com/openshift/runbooks/blob/master/alerts/cluster-network-operator/OVNKubernetesNorthdInactive.md`))
})
| ||||||
test
|
openshift/openshift-tests-private
|
49c50bec-3947-48de-b626-4b93ef1c851a
|
adminnetworkpolicy
|
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
package networking
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-networking] SDN adminnetworkpolicy", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-adminnetworkpolicy", exutil.KubeConfigPath())
g.BeforeEach(func() {
// Check the cluster type
networkType := exutil.CheckNetworkType(oc)
o.Expect(networkType).NotTo(o.BeEmpty())
if !strings.Contains(networkType, "ovn") {
g.Skip(fmt.Sprintf("Baseline Admin and Admin network policies not supported on cluster type : %s", networkType))
}
})
//https://issues.redhat.com/browse/SDN-2931
g.It("Author:asood-High-67103-[FdpOvnOvs] Egress BANP, NP and ANP policy with allow, deny and pass action. [Serial]", func() {
var (
testID = "67103"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
egressPolicyTypeFile = filepath.Join(testDataDir, "networkpolicy/allow-egress-red.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
targetPods = make(map[string]string)
podColors = []string{"red", "blue"}
nsList = []string{}
)
exutil.By("1. Get the first namespace (subject) and create another (target)")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
oc.SetupProject()
targetNs := oc.Namespace()
nsList = append(nsList, targetNs)
exutil.By("2. Create two pods in each namespace")
rcPingPodResource := replicationControllerPingPodResource{
name: testID + "-test-pod",
replicas: 2,
namespace: "",
template: rcPingPodTemplate,
}
for i := 0; i < 2; i++ {
rcPingPodResource.namespace = nsList[i]
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", subjectNs)
rcPingPodResource.createReplicaController(oc)
err := waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
}
podListSubjectNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[0], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListSubjectNs)).Should(o.Equal(2))
podListTargetNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[1], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListTargetNs)).Should(o.Equal(2))
exutil.By("3. Label pod in target namespace")
for i := 0; i < 2; i++ {
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", podListTargetNs[i], "-n", targetNs, "type="+podColors[i]).Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
targetPods[podColors[i]] = podListTargetNs[i]
}
exutil.By("4. Create a Baseline Admin Network Policy with deny action")
banpCR := singleRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
policyType: "egress",
direction: "to",
ruleName: "default-deny-to-" + targetNs,
ruleAction: "Deny",
ruleKey: matchLabelKey,
ruleVal: targetNs,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("5. Verify BANP blocks all egress traffic from %s to %s", subjectNs, targetNs))
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, podListTargetNs[j])
}
}
exutil.By("6. Create a network policy with egress rule")
createResourceFromFile(oc, subjectNs, egressPolicyTypeFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "allow-egress-to-red")).To(o.BeTrue())
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", targetNs, "team=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7. Verify network policy overrides BANP and only egress to pods labeled type=red works")
for i := 0; i < 2; i++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["red"])
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["blue"])
}
exutil.By("8. Verify ANP with different actions and priorities")
anpIngressRuleCR := singleRuleANPPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 10,
policyType: "egress",
direction: "to",
ruleName: "allow-to-" + targetNs,
ruleAction: "Allow",
ruleKey: matchLabelKey,
ruleVal: targetNs,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("8.1 Verify ANP priority %v with name %s action %s egress traffic from %s to %s", anpIngressRuleCR.priority, anpIngressRuleCR.name, anpIngressRuleCR.ruleAction, subjectNs, targetNs))
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, podListTargetNs[j])
}
}
anpIngressRuleCR.name = "anp-" + testID + "-2"
anpIngressRuleCR.priority = 5
anpIngressRuleCR.ruleName = "deny-to-" + targetNs
anpIngressRuleCR.ruleAction = "Deny"
exutil.By(fmt.Sprintf(" 8.2 Verify ANP priority %v with name %s action %s egress traffic from %s to %s", anpIngressRuleCR.priority, anpIngressRuleCR.name, anpIngressRuleCR.ruleAction, subjectNs, targetNs))
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, podListTargetNs[j])
}
}
anpIngressRuleCR.name = "anp-" + testID + "-3"
anpIngressRuleCR.priority = 0
anpIngressRuleCR.ruleName = "pass-to-" + targetNs
anpIngressRuleCR.ruleAction = "Pass"
exutil.By(fmt.Sprintf("8.3 Verify ANP priority %v with name %s action %s egress traffic from %s to %s", anpIngressRuleCR.priority, anpIngressRuleCR.name, anpIngressRuleCR.ruleAction, subjectNs, targetNs))
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
for i := 0; i < 2; i++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["red"])
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["blue"])
}
exutil.By("9. Change label on type=blue to red and verify traffic")
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", targetPods["blue"], "-n", targetNs, "type="+podColors[0], "--overwrite").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
for i := 0; i < 2; i++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["blue"])
}
})
g.It("Author:asood-High-67104-[FdpOvnOvs] Ingress BANP, NP and ANP policy with allow, deny and pass action. [Serial]", func() {
var (
testID = "67104"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template.yaml")
anpMultiRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
ingressNPPolicyTemplate = filepath.Join(testDataDir, "networkpolicy/generic-networkpolicy-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nsList = []string{}
policyType = "ingress"
direction = "from"
nsPod = make(map[string]string)
)
exutil.By("1. Get the first namespace (subject) and create three (source) namespaces")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
for i := 0; i < 3; i++ {
oc.SetupProject()
sourceNs := oc.Namespace()
nsList = append(nsList, sourceNs)
}
e2e.Logf("Project list %v", nsList)
exutil.By("2. Create a pod in all the namespaces")
rcPingPodResource := replicationControllerPingPodResource{
name: "",
replicas: 1,
namespace: "",
template: rcPingPodTemplate,
}
for i := 0; i < 4; i++ {
rcPingPodResource.namespace = nsList[i]
rcPingPodResource.name = testID + "-test-pod-" + strconv.Itoa(i)
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", nsList[i])
rcPingPodResource.createReplicaController(oc)
err := waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[i], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(1))
nsPod[nsList[i]] = podListNs[0]
e2e.Logf(fmt.Sprintf("Project %s has pod %s", nsList[i], nsPod[nsList[i]]))
}
exutil.By("3. Create a Baseline Admin Network Policy with ingress allow action for first two namespaces and deny for third")
banpCR := multiRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
policyType: policyType,
direction: direction,
ruleName1: "default-allow-from-" + nsList[1],
ruleAction1: "Allow",
ruleKey1: matchLabelKey,
ruleVal1: nsList[1],
ruleName2: "default-allow-from-" + nsList[2],
ruleAction2: "Allow",
ruleKey2: matchLabelKey,
ruleVal2: nsList[2],
ruleName3: "default-deny-from-" + nsList[3],
ruleAction3: "Deny",
ruleKey3: matchLabelKey,
ruleVal3: nsList[3],
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("4. Verify the traffic coming into subject namespace %s is allowed from first two namespaces and denied from third", nsList[0]))
for i := 1; i < 3; i++ {
CurlPod2PodPass(oc, nsList[i], nsPod[nsList[i]], nsList[0], nsPod[nsList[0]])
}
CurlPod2PodFail(oc, nsList[3], nsPod[nsList[3]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("5. Create another Admin Network Policy with ingress deny action to %s from %s namespace", nsList[0], nsList[2]))
anpEgressRuleCR := singleRuleANPPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 17,
policyType: "ingress",
direction: "from",
ruleName: "deny-from-" + nsList[2],
ruleAction: "Deny",
ruleKey: matchLabelKey,
ruleVal: nsList[2],
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpEgressRuleCR.name)
anpEgressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("6. Verify traffic from %s to %s is denied", nsList[2], nsList[0]))
CurlPod2PodFail(oc, nsList[2], nsPod[nsList[2]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("7. Create another Admin Network Policy with ingress deny action to %s and pass action to %s and %s from %s namespace with higher priority", nsList[0], nsList[1], nsList[2], nsList[3]))
anpEgressMultiRuleCR := multiRuleANPPolicyResource{
name: "anp-" + testID + "-2",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 16,
policyType: "ingress",
direction: "from",
ruleName1: "deny-from-" + nsList[1],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[1],
ruleName2: "pass-from-" + nsList[2],
ruleAction2: "Pass",
ruleKey2: matchLabelKey,
ruleVal2: nsList[2],
ruleName3: "pass-from-" + nsList[3],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[3],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpEgressMultiRuleCR.name)
anpEgressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("8. Verify traffic from %s to %s is allowed due to action %s", nsList[2], nsList[0], anpEgressMultiRuleCR.ruleAction2))
CurlPod2PodPass(oc, nsList[2], nsPod[nsList[2]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("9. Verify traffic from %s and %s to %s is denied", nsList[1], nsList[3], nsList[0]))
CurlPod2PodFail(oc, nsList[1], nsPod[nsList[1]], nsList[0], nsPod[nsList[0]])
CurlPod2PodFail(oc, nsList[3], nsPod[nsList[3]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("10. Create a networkpolicy in %s for ingress from %s and %s", subjectNs, nsList[1], nsList[3]))
matchStr := "matchLabels"
networkPolicyResource := networkPolicyResource{
name: "ingress-" + testID + "-networkpolicy",
namespace: subjectNs,
policy: "ingress",
policyType: "Ingress",
direction1: "from",
namespaceSel1: matchStr,
namespaceSelKey1: matchLabelKey,
namespaceSelVal1: nsList[1],
direction2: "from",
namespaceSel2: matchStr,
namespaceSelKey2: matchLabelKey,
namespaceSelVal2: nsList[3],
template: ingressNPPolicyTemplate,
}
networkPolicyResource.createNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("12. Verify ingress traffic from %s and %s is denied and alllowed from %s", nsList[1], nsList[2], nsList[3]))
for i := 1; i < 2; i++ {
CurlPod2PodFail(oc, nsList[i], nsPod[nsList[i]], nsList[0], nsPod[nsList[0]])
}
CurlPod2PodPass(oc, nsList[3], nsPod[nsList[3]], nsList[0], nsPod[nsList[0]])
})
g.It("Author:asood-Longduration-NonPreRelease-High-67105-[FdpOvnOvs] Ingress BANP, ANP and NP with allow, deny and pass action with TCP, UDP and SCTP protocols. [Serial]", func() {
var (
testID = "67105"
testDataDir = exutil.FixturePath("testdata", "networking")
sctpTestDataDir = filepath.Join(testDataDir, "sctp")
sctpClientPod = filepath.Join(sctpTestDataDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(sctpTestDataDir, "sctpserver.yaml")
sctpModule = filepath.Join(sctpTestDataDir, "load-sctp-module.yaml")
udpListenerPod = filepath.Join(testDataDir, "udp-listener.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-rule-template.yaml")
anpMultiRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
ingressNPPolicyTemplate = filepath.Join(testDataDir, "networkpolicy/generic-networkpolicy-protocol-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nsList = []string{}
udpPort = "8181"
policyType = "ingress"
direction = "from"
matchStr = "matchLabels"
)
exutil.By("1. Test setup")
exutil.By("Enable SCTP on all workers")
prepareSCTPModule(oc, sctpModule)
exutil.By("Get the first namespace, create three additional namespaces and label all except the subject namespace")
nsList = append(nsList, oc.Namespace())
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nsList[0], "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
for i := 0; i < 2; i++ {
oc.SetupProject()
peerNs := oc.Namespace()
nsLabelErr = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", peerNs, "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
nsList = append(nsList, peerNs)
}
oc.SetupProject()
subjectNs := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, subjectNs)
exutil.SetNamespacePrivileged(oc, subjectNs)
exutil.By("2. Create a Baseline Admin Network Policy with deny action for each peer namespace")
banpCR := multiRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
policyType: policyType,
direction: direction,
ruleName1: "default-deny-from-" + nsList[0],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "default-deny-from-" + nsList[1],
ruleAction2: "Deny",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "default-deny-from-" + nsList[2],
ruleAction3: "Deny",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("3. Create workload in namespaces")
exutil.By(fmt.Sprintf("Create clients in peer namespaces and SCTP/UDP/TCP services in the subject %s namespace", subjectNs))
for i := 0; i < 3; i++ {
exutil.By(fmt.Sprintf("Create SCTP client pod in %s", nsList[0]))
createResourceFromFile(oc, nsList[i], sctpClientPod)
err1 := waitForPodWithLabelReady(oc, nsList[i], "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "SCTP client pod is not running")
}
exutil.By(fmt.Sprintf("Create SCTP server pod in %s", subjectNs))
createResourceFromFile(oc, subjectNs, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, subjectNs, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "SCTP server pod is not running")
exutil.By(fmt.Sprintf("Create a pod in %s for TCP", subjectNs))
rcPingPodResource := replicationControllerPingPodResource{
name: "test-pod-" + testID,
replicas: 1,
namespace: subjectNs,
template: rcPingPodTemplate,
}
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", rcPingPodResource.namespace)
rcPingPodResource.createReplicaController(oc)
err = waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(1))
exutil.By(fmt.Sprintf("Create UDP Listener Pod in %s", subjectNs))
createResourceFromFile(oc, subjectNs, udpListenerPod)
err = waitForPodWithLabelReady(oc, subjectNs, "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "The pod with label name=udp-pod not ready")
var udpPodName []string
udpPodName = getPodName(oc, subjectNs, "name=udp-pod")
exutil.By(fmt.Sprintf("4. All type of ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("5. Create ANP for TCP with ingress allow action from %s, deny from %s and pass action from %s to %s", nsList[0], nsList[1], nsList[2], subjectNs))
anpIngressMultiRuleCR := multiRuleANPPolicyResource{
name: "anp-ingress-tcp-" + testID + "-0",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 15,
policyType: policyType,
direction: direction,
ruleName1: "allow-from-" + nsList[0],
ruleAction1: "Allow",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "deny-from-" + nsList[1],
ruleAction2: "Deny",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "pass-from-" + nsList[2],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressMultiRuleCR.name)
anpIngressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("5.1 Update protocol for each rule"))
for i := 0; i < 2; i++ {
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/%s/ports\", \"value\": [\"portNumber\": {\"protocol\": \"TCP\", \"port\": 8080}]}]", strconv.Itoa(i))
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpIngressMultiRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("6. Traffic validation after anp %s is applied to %s", anpIngressMultiRuleCR.name, subjectNs))
exutil.By(fmt.Sprintf("6.0. SCTP and UDP ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("6.1. TCP ingress traffic to %s from the clients %s and %s is denied", nsList[1], nsList[2], subjectNs))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("6.2. TCP ingress traffic to %s from the client %s is allowed", nsList[0], subjectNs))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("7. Create second ANP for SCTP with ingress deny action from %s & %s and pass action from %s to %s", nsList[0], nsList[1], nsList[2], subjectNs))
anpIngressMultiRuleCR = multiRuleANPPolicyResource{
name: "anp-ingress-sctp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 10,
policyType: policyType,
direction: direction,
ruleName1: "deny-from-" + nsList[0],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "deny-from-" + nsList[1],
ruleAction2: "Deny",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "pass-from-" + nsList[2],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressMultiRuleCR.name)
anpIngressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("7.1 Update protocol for each rule"))
for i := 0; i < 2; i++ {
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/%s/ports\", \"value\": [\"portNumber\": {\"protocol\": \"SCTP\", \"port\": 30102}]}]", strconv.Itoa(i))
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpIngressMultiRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("8. Traffic validation after anp %s as applied to %s", anpIngressMultiRuleCR.name, subjectNs))
exutil.By(fmt.Sprintf("8.0. SCTP and UDP ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("8.1. TCP ingress traffic to %s from the clients %s and %s is denied", nsList[1], nsList[2], subjectNs))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("8.2. TCP ingress traffic to %s from the client %s is allowed", nsList[0], subjectNs))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("9. Create a network policy in %s from the client %s to allow SCTP", subjectNs, nsList[2]))
networkPolicyResource := networkPolicyProtocolResource{
name: "allow-ingress-sctp-" + testID,
namespace: subjectNs,
policy: policyType,
policyType: "Ingress",
direction: direction,
namespaceSel: matchStr,
namespaceSelKey: matchLabelKey,
namespaceSelVal: nsList[2],
podSel: matchStr,
podSelKey: "name",
podSelVal: "sctpclient",
port: 30102,
protocol: "SCTP",
template: ingressNPPolicyTemplate,
}
networkPolicyResource.createProtocolNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
patchNP := `[{"op": "add", "path": "/spec/podSelector", "value": {"matchLabels": {"name":"sctpserver"}}}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("networkpolicy", networkPolicyResource.name, "-n", subjectNs, "--type=json", "-p", patchNP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("10. Traffic validation after network policy %s is applied to %s", networkPolicyResource.name, subjectNs))
exutil.By(fmt.Sprintf("10.0. UDP ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("10.1. SCTP ingress traffic to %s from the %s and %s clients is denied", subjectNs, nsList[0], nsList[1]))
for i := 0; i < 2; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
}
exutil.By(fmt.Sprintf("10.2. SCTP ingress traffic to %s from the %s client is allowed", subjectNs, nsList[2]))
checkSCTPTraffic(oc, sctpClientPodname, nsList[2], sctpServerPodName, subjectNs, true)
exutil.By(fmt.Sprintf("10.3. TCP ingress traffic to %s from the clients %s and %s is denied", nsList[1], nsList[2], subjectNs))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("10.4. TCP ingress traffic to %s from the client %s is allowed", nsList[0], subjectNs))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("11. Create third ANP for UDP with ingress pass action from %s, %s and %s to %s", nsList[0], nsList[1], nsList[2], subjectNs))
anpIngressMultiRuleCR = multiRuleANPPolicyResource{
name: "anp-ingress-udp-" + testID + "-2",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 5,
policyType: policyType,
direction: direction,
ruleName1: "pass-from-" + nsList[0],
ruleAction1: "Pass",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "pass-from-" + nsList[1],
ruleAction2: "Pass",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "pass-from-" + nsList[2],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressMultiRuleCR.name)
anpIngressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("11.1 Update protocol for each rule"))
for i := 0; i < 2; i++ {
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/%s/ports\", \"value\": [\"portNumber\": {\"protocol\": \"UDP\", \"port\": %v}]}]", strconv.Itoa(i), udpPort)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpIngressMultiRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("12. Traffic validation after admin network policy %s is applied to %s", anpIngressMultiRuleCR.name, subjectNs))
exutil.By(fmt.Sprintf("12.1 UDP traffic from all the clients to %s is denied", subjectNs))
for i := 0; i < 3; i++ {
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("12.2 SCTP traffic from the clients %s & %s to %s is denied, allowed from %s", nsList[0], nsList[1], subjectNs, nsList[2]))
for i := 0; i < 2; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
}
checkSCTPTraffic(oc, sctpClientPodname, nsList[2], sctpServerPodName, subjectNs, true)
exutil.By(fmt.Sprintf("12.3 TCP traffic from the clients %s & %s to %s is denied, allowed from %s", nsList[1], nsList[2], subjectNs, nsList[0]))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("13. Create a network policy in %s from the client %s to allow SCTP", subjectNs, nsList[2]))
networkPolicyResource = networkPolicyProtocolResource{
name: "allow-all-protocols-" + testID,
namespace: subjectNs,
policy: policyType,
policyType: "Ingress",
direction: direction,
namespaceSel: matchStr,
namespaceSelKey: "team",
namespaceSelVal: "qe",
podSel: matchStr,
podSelKey: "name",
podSelVal: "sctpclient",
port: 30102,
protocol: "SCTP",
template: ingressNPPolicyTemplate,
}
networkPolicyResource.createProtocolNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
patchNP = `[{"op": "add", "path": "/spec/ingress/0/ports", "value": [{"protocol": "TCP", "port": 8080},{"protocol": "UDP", "port": 8181}, {"protocol": "SCTP", "port": 30102}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("networkpolicy", networkPolicyResource.name, "-n", subjectNs, "--type=json", "-p", patchNP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("14. Traffic validation to %s from the clients is allowed", subjectNs))
exutil.By(fmt.Sprintf("14.1 UDP ingress traffic to %s from the clients is allowed", subjectNs))
for i := 0; i < 3; i++ {
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, true)
}
exutil.By(fmt.Sprintf("14.2 TCP traffic from the clients %s & %s to %s is allowed but denied from %s", nsList[0], nsList[2], subjectNs, nsList[1]))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
CurlPod2PodFail(oc, nsList[1], sctpClientPodname, subjectNs, podListNs[0])
CurlPod2PodPass(oc, nsList[2], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("14.3 SCTP traffic from the clients %s & %s to %s is denied but allowed from %s", nsList[0], nsList[1], subjectNs, nsList[2]))
for i := 0; i < 2; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
}
checkSCTPTraffic(oc, sctpClientPodname, nsList[2], sctpServerPodName, subjectNs, true)
})
g.It("Author:asood-High-67614-[FdpOvnOvs] Egress BANP, ANP and NP with allow, deny and pass action with TCP, UDP and SCTP protocols. [Serial]", func() {
var (
testID = "67614"
testDataDir = exutil.FixturePath("testdata", "networking")
sctpTestDataDir = filepath.Join(testDataDir, "sctp")
sctpClientPod = filepath.Join(sctpTestDataDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(sctpTestDataDir, "sctpserver.yaml")
sctpModule = filepath.Join(sctpTestDataDir, "load-sctp-module.yaml")
udpListenerPod = filepath.Join(testDataDir, "udp-listener.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-me-template.yaml")
anpSingleRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-me-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
egressNPPolicyTemplate = filepath.Join(testDataDir, "networkpolicy/generic-networkpolicy-protocol-template.yaml")
matchExpKey = "kubernetes.io/metadata.name"
matchExpOper = "In"
nsList = []string{}
policyType = "egress"
direction = "to"
udpPort = "8181"
matchStr = "matchLabels"
)
exutil.By("1. Test setup")
exutil.By("Enable SCTP on all workers")
prepareSCTPModule(oc, sctpModule)
exutil.By("Get the first namespace, create three additional namespaces and label all except the subject namespace")
nsList = append(nsList, oc.Namespace())
subjectNs := nsList[0]
for i := 0; i < 3; i++ {
oc.SetupProject()
peerNs := oc.Namespace()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", peerNs, "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
nsList = append(nsList, peerNs)
}
// First created namespace for SCTP
defer exutil.RecoverNamespaceRestricted(oc, nsList[1])
exutil.SetNamespacePrivileged(oc, nsList[1])
exutil.By("2. Create a Baseline Admin Network Policy with deny action for egress to each peer namespaces for all protocols")
banpCR := singleRuleBANPMEPolicyResource{
name: "default",
subjectKey: matchExpKey,
subjectOperator: matchExpOper,
subjectVal: subjectNs,
policyType: policyType,
direction: direction,
ruleName: "default-deny-to-all",
ruleAction: "Deny",
ruleKey: matchExpKey,
ruleOperator: matchExpOper,
ruleVal: nsList[1],
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleBANPMatchExp(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
nsListVal, err := json.Marshal(nsList[1:])
o.Expect(err).NotTo(o.HaveOccurred())
patchBANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/egress/0/to/0/namespaces/matchExpressions/0/values\", \"value\": %s}]", nsListVal)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3. Create workload in namespaces")
exutil.By(fmt.Sprintf("Create client in subject %s namespace and SCTP, UDP & TCP service respectively in other three namespaces", subjectNs))
exutil.By(fmt.Sprintf("Create SCTP client pod in %s", nsList[0]))
createResourceFromFile(oc, nsList[0], sctpClientPod)
err1 := waitForPodWithLabelReady(oc, nsList[0], "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "SCTP client pod is not running")
exutil.By(fmt.Sprintf("Create SCTP server pod in %s", nsList[1]))
createResourceFromFile(oc, nsList[1], sctpServerPod)
err2 := waitForPodWithLabelReady(oc, nsList[1], "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "SCTP server pod is not running")
exutil.By(fmt.Sprintf("Create a pod in %s for TCP", nsList[2]))
rcPingPodResource := replicationControllerPingPodResource{
name: "test-pod-" + testID,
replicas: 1,
namespace: nsList[2],
template: rcPingPodTemplate,
}
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", rcPingPodResource.namespace)
rcPingPodResource.createReplicaController(oc)
err = waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(1))
exutil.By(fmt.Sprintf("Create UDP Listener Pod in %s", nsList[3]))
createResourceFromFile(oc, nsList[3], udpListenerPod)
err = waitForPodWithLabelReady(oc, nsList[3], "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "The pod with label name=udp-pod not ready")
var udpPodName []string
udpPodName = getPodName(oc, nsList[3], "name=udp-pod")
exutil.By(fmt.Sprintf("4. All type of egress traffic from %s to TCP/UDP/SCTP service is denied", subjectNs))
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], false)
CurlPod2PodFail(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, false)
exutil.By("5. Create a Admin Network Policy with allow action for egress to each peer namespaces for all protocols")
anpEgressRuleCR := singleRuleANPMEPolicyResource{
name: "anp-" + policyType + "-" + testID + "-1",
subjectKey: matchExpKey,
subjectOperator: matchExpOper,
subjectVal: subjectNs,
priority: 10,
policyType: "egress",
direction: "to",
ruleName: "allow-to-all",
ruleAction: "Allow",
ruleKey: matchExpKey,
ruleOperator: matchExpOper,
ruleVal: nsList[1],
template: anpSingleRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpEgressRuleCR.name)
anpEgressRuleCR.createSingleRuleANPMatchExp(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressRuleCR.name)).To(o.BeTrue())
exutil.By("5.1 Update ANP to include all the namespaces")
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/egress/0/to/0/namespaces/matchExpressions/0/values\", \"value\": %s}]", nsListVal)
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("6. Egress traffic from %s to TCP/UDP/SCTP service is allowed after ANP %s is applied", subjectNs, anpEgressRuleCR.name))
exutil.By(fmt.Sprintf("6.1 Egress traffic from %s to TCP and service is allowed", subjectNs))
patchANP = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 8080}}, {"portNumber": {"protocol": "UDP", "port": 8181}}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], false)
CurlPod2PodPass(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, true)
exutil.By(fmt.Sprintf("6.2 Egress traffic from %s to SCTP service is also allowed", subjectNs))
patchANP = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 8080}}, {"portNumber": {"protocol": "UDP", "port": 8181}}, {"portNumber": {"protocol": "SCTP", "port": 30102}}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], true)
CurlPod2PodPass(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, true)
exutil.By("7. Create another Admin Network Policy with pass action for egress to each peer namespaces for all protocols")
anpEgressRuleCR.name = "anp-" + policyType + "-" + testID + "-2"
anpEgressRuleCR.priority = 5
anpEgressRuleCR.ruleName = "pass-to-all"
anpEgressRuleCR.ruleAction = "Pass"
defer removeResource(oc, true, true, "anp", anpEgressRuleCR.name)
anpEgressRuleCR.createSingleRuleANPMatchExp(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressRuleCR.name)).To(o.BeTrue())
exutil.By("7.1 Update ANP to include all the namespaces")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/egress/0/to/0/namespaces/matchExpressions/0/values\", \"value\": %s}]", nsListVal)
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("7.2 Egress traffic from %s to TCP/UDP/SCTP service is denied after ANP %s is applied", subjectNs, anpEgressRuleCR.name))
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], false)
CurlPod2PodFail(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, false)
exutil.By(fmt.Sprintf("8. Egress traffic from %s to TCP/SCTP/UDP service is allowed after network policy is applied", subjectNs))
networkPolicyResource := networkPolicyProtocolResource{
name: "allow-all-protocols-" + testID,
namespace: subjectNs,
policy: policyType,
policyType: "Egress",
direction: direction,
namespaceSel: matchStr,
namespaceSelKey: "team",
namespaceSelVal: "qe",
podSel: matchStr,
podSelKey: "name",
podSelVal: "sctpclient",
port: 30102,
protocol: "SCTP",
template: egressNPPolicyTemplate,
}
networkPolicyResource.createProtocolNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("8.1 Update the network policy %s in %s to add ports for protocols and all the pods ", networkPolicyResource.name, subjectNs))
patchNP := `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"protocol": "TCP", "port": 8080},{"protocol": "UDP", "port": 8181}, {"protocol": "SCTP", "port": 30102}]}, {"op": "add", "path": "/spec/egress/0/to", "value": [{"namespaceSelector": {"matchLabels": {"team": "qe"}}, "podSelector": {}}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("networkpolicy", networkPolicyResource.name, "-n", subjectNs, "--type=json", "-p", patchNP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], true)
CurlPod2PodPass(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, true)
})
//https://issues.redhat.com/browse/SDN-4517
g.It("Author:asood-High-73189-[FdpOvnOvs] BANP and ANP ACL audit log works [Serial]", func() {
var (
testID = "73189"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-pod-mixed-rule-template.yaml")
anpMultiRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-pod-mixed-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nsList = []string{}
podKey = "color"
podVal = "red"
coloredPods = make(map[string]string)
unColoredPods = make(map[string]string)
ovnkubeNodeColoredPods = make(map[string]string)
ovnkubeNodeUnColoredPods = make(map[string]string)
)
exutil.By("1. Get the first namespace (subject) and create three peer namespaces")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
for i := 0; i < 3; i++ {
oc.SetupProject()
peerNs := oc.Namespace()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", peerNs, "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
nsList = append(nsList, peerNs)
}
e2e.Logf("Project list %v", nsList)
exutil.By("2. Create pods in all the namespaces, label one of the pod and obtain ovnkube-node pod for the scheduled pods in subject namespace.")
rcPingPodResource := replicationControllerPingPodResource{
name: "",
replicas: 2,
namespace: "",
template: rcPingPodTemplate,
}
for i := 0; i < 4; i++ {
rcPingPodResource.namespace = nsList[i]
rcPingPodResource.name = testID + "-test-pod-" + strconv.Itoa(i)
e2e.Logf("Create replica controller for pods %s", rcPingPodResource.name)
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", nsList[i])
rcPingPodResource.createReplicaController(oc)
err := waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[i], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(2))
e2e.Logf("Label pod %s in project %s", podListNs[0], nsList[i])
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", podListNs[0], "-n", nsList[i], podKey+"="+podVal).Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
coloredPods[nsList[i]] = podListNs[0]
unColoredPods[nsList[i]] = podListNs[1]
if i == 0 {
e2e.Logf("Get ovnkube-node pod scheduled on the same node where first pods %s is scheduled", podListNs[0])
nodeName, nodeNameErr := exutil.GetPodNodeName(oc, nsList[i], podListNs[0])
o.Expect(nodeNameErr).NotTo(o.HaveOccurred())
ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(podErr).NotTo(o.HaveOccurred())
ovnkubeNodeColoredPods[nsList[i]] = ovnKubePod
e2e.Logf("Get equivalent ovnkube-node pod scheduled on the same node where second pod %s is scheduled", podListNs[1])
nodeName, nodeNameErr = exutil.GetPodNodeName(oc, nsList[i], podListNs[1])
o.Expect(nodeNameErr).NotTo(o.HaveOccurred())
ovnKubePod, podErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(podErr).NotTo(o.HaveOccurred())
ovnkubeNodeUnColoredPods[nsList[i]] = ovnKubePod
}
}
exutil.By("3. Create a BANP Policy with egress allow action and ingress deny action for subject namespace")
banpCR := multiPodMixedRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
subjectPodKey: podKey,
subjectPodVal: podVal,
policyType1: "egress",
direction1: "to",
ruleName1: "default-allow-egress-to-colored-pods",
ruleAction1: "Allow",
ruleKey1: "team",
ruleVal1: "qe",
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "ingress",
direction2: "from",
ruleName2: "default-deny-from-colored-pods",
ruleAction2: "Deny",
ruleKey2: "team",
ruleVal2: "qe",
rulePodKey2: podKey,
rulePodVal2: podVal,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiPodMixedRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("3.1 Update BANP subject pod selector.")
patchBANP := `[{"op": "add", "path": "/spec/subject/pods/podSelector", "value": {}}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Update BANP to add another egress rule to BANP")
patchBANP = `[{"op": "add", "path": "/spec/egress/1", "value": { "action": "Deny", "name": "default-deny-unlabelled-pods", "to": [{"pods": { "namespaceSelector": {"matchLabels": {"team": "qe"}}, "podSelector": {"matchExpressions": [{"key": "color", "operator": "DoesNotExist"}]}}}]} }]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.3 Update BANP to add another ingress rule to BANP")
patchBANP = `[{"op": "add", "path": "/spec/ingress/1", "value": { "action": "Allow", "name": "default-allow-unlabelled-pods", "from": [{"pods": { "namespaceSelector": {"matchLabels": {"team": "qe"}}, "podSelector": {"matchExpressions": [{"key": "color", "operator": "DoesNotExist"}]}}}]} }]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("4. BANP ACL audit logging verification for each rule")
aclLogSearchString := fmt.Sprintf("name=\"BANP:default:Egress:0\", verdict=allow, severity=alert")
exutil.By(fmt.Sprintf("4.1 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[1], coloredPods[nsList[1]], "pass", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Egress:1\", verdict=drop, severity=alert")
exutil.By(fmt.Sprintf("4.2 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[1], unColoredPods[nsList[1]], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Ingress:0\", verdict=drop, severity=alert")
exutil.By(fmt.Sprintf("4.3 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, nsList[2], coloredPods[nsList[2]], subjectNs, coloredPods[subjectNs], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Ingress:1\", verdict=allow, severity=alert")
exutil.By(fmt.Sprintf("4.4 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, nsList[3], unColoredPods[nsList[3]], subjectNs, unColoredPods[subjectNs], "pass", aclLogSearchString, ovnkubeNodeUnColoredPods[subjectNs], true)
exutil.By("5. Update BANP to change action on ingress from allow to deny")
patchBANP = `[{"op": "add", "path": "/spec/egress/0/action", "value": "Deny"}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("6. Create Admin Network Policy with ingress deny from %s to %s and egress allow to %s and pass to %s from %s namespace", nsList[1], nsList[0], nsList[2], nsList[3], nsList[0]))
anpMultiMixedRuleCR := multiPodMixedRuleANPPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
subjectPodKey: podKey,
subjectPodVal: podVal,
priority: 20,
policyType1: "ingress",
direction1: "from",
ruleName1: "deny-from-" + nsList[1],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[1],
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "egress",
direction2: "to",
ruleName2: "allow-to-" + nsList[2],
ruleAction2: "Allow",
ruleKey2: matchLabelKey,
ruleVal2: nsList[2],
rulePodKey2: podKey,
rulePodVal2: podVal,
ruleName3: "pass-to-" + nsList[3],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[3],
rulePodKey3: "color",
rulePodVal3: "red",
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpMultiMixedRuleCR.name)
anpMultiMixedRuleCR.createMultiPodMixedRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpMultiMixedRuleCR.name)).To(o.BeTrue())
aclLogSearchString = fmt.Sprintf("name=\"ANP:%s:Ingress:0\", verdict=drop, severity=alert", anpMultiMixedRuleCR.name)
exutil.By(fmt.Sprintf("6.1 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, nsList[1], coloredPods[nsList[1]], subjectNs, coloredPods[subjectNs], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"ANP:%s:Egress:0\", verdict=allow, severity=warning", anpMultiMixedRuleCR.name)
exutil.By(fmt.Sprintf("6.2 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[2], coloredPods[nsList[2]], "pass", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"ANP:%s:Egress:1\", verdict=pass, severity=info", anpMultiMixedRuleCR.name)
exutil.By(fmt.Sprintf("6.3 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[3], coloredPods[nsList[3]], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
exutil.By("7. Update BANP Policy annotation to see allow ACL is no longer audited")
aclSettings := aclSettings{DenySetting: "", AllowSetting: "warning"}
annotationErr := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("--overwrite", "baselineadminnetworkpolicy", "default", aclSettings.getJSONString()).Execute()
o.Expect(annotationErr).NotTo(o.HaveOccurred())
exutil.By("8. Update ANP Policy ingress rule from allow to pass to verify BANP ACL logging change")
patchANP := `[{"op": "replace", "path": "/spec/ingress/0/action", "value": "Pass" }]`
patchANPErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("anp", anpMultiMixedRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchANPErr).NotTo(o.HaveOccurred())
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Ingress:0\", verdict=drop, severity=alert")
exutil.By(fmt.Sprintf("8.1 Verify ACL for rule %s in BANP is not logged", aclLogSearchString))
checkACLLogs(oc, nsList[1], coloredPods[nsList[1]], subjectNs, coloredPods[subjectNs], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], false)
})
g.It("Author:asood-High-73604-BANP and ANP validation. [Serial]", func() {
var (
testID = "73604"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
validCIDR = "10.10.10.1/24"
matchLabelKey = "kubernetes.io/metadata.name"
invalidCIDR = "10.10.10.1-10.10.10.1"
invalidIPv6 = "2001:db8:a0b:12f0::::0:1/128"
expectedMessages = [3]string{"Duplicate value", "Invalid CIDR format provided", "Invalid CIDR format provided"}
resourceType = [2]string{"banp", "anp"}
patchCIDR = []string{}
resourceName = []string{}
patchAction string
)
subjectNs := oc.Namespace()
exutil.By("Create BANP with single rule with CIDR")
banp := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
ruleName: "Egress to CIDR",
ruleAction: "Deny",
cidr: validCIDR,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banp.name)
banp.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banp.name)).To(o.BeTrue())
resourceName = append(resourceName, banp.name)
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-0-" + testID,
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 10,
ruleName: "Egress to CIDR",
ruleAction: "Deny",
cidr: validCIDR,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
resourceName = append(resourceName, anpCR.name)
patchCIDR = append(patchCIDR, fmt.Sprintf("[{\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\": %s }]", validCIDR))
patchCIDR = append(patchCIDR, fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/egress/0/to/0/networks/0\", \"value\": %s}]", invalidCIDR))
patchCIDR = append(patchCIDR, fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/egress/0/to/0/networks/0\", \"value\": %s}]", invalidIPv6))
exutil.By("BANP and ANP validation with invalid CIDR values")
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("Validating %s with name %s", strings.ToUpper(resourceType[i]), resourceName[i]))
for j := 0; j < len(expectedMessages); j++ {
exutil.By(fmt.Sprintf("Validating %s message", expectedMessages[j]))
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(resourceType[i], resourceName[i], "--type=json", "-p", patchCIDR[j]).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, expectedMessages[j])).To(o.BeTrue())
}
}
exutil.By("BANP and ANP validation with action values in lower case")
policyActions := map[string][]string{"banp": {"allow", "deny"}, "anp": {"allow", "deny", "pass"}}
idx := 0
for _, polType := range resourceType {
exutil.By(fmt.Sprintf("Validating %s with name %s", strings.ToUpper(polType), resourceName[idx]))
for _, actionStr := range policyActions[polType] {
exutil.By(fmt.Sprintf("Validating invalid action %s", actionStr))
patchAction = fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/egress/0/action\", \"value\": %s}]", actionStr)
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(polType, resourceName[idx], "--type=json", "-p", patchAction).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, fmt.Sprintf("Unsupported value: \"%s\"", actionStr))).To(o.BeTrue())
}
idx = idx + 1
}
exutil.By("ANP validation for priority more than 99")
anpCR.name = "anp-1-" + testID
anpCR.priority = 100
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
statusChk, statusChkMsg := checkSpecificPolicyStatus(oc, "anp", anpCR.name, "message", "OVNK only supports priority ranges 0-99")
o.Expect(statusChk).To(o.BeTrue())
o.Expect(statusChkMsg).To(o.BeEmpty())
})
g.It("Author:asood-High-73802-[FdpOvnOvs] BANP and ANP work with named ports. [Serial]", func() {
var (
testID = "73802"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-pod-mixed-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-me-template.yaml")
namedPortPodTemplate = filepath.Join(testDataDir, "named-port-pod-template.yaml")
direction = "from"
policyType = "ingress"
namespaceLabelKey = "team"
namespaceLabelVal = "qe"
podKey = "name"
podVal = "hello-pod"
nsList = []string{}
dummyLabel = "qe1"
)
exutil.By("1. Get the first namespace (subject) and create another (peer)")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
oc.SetupProject()
peerNs := oc.Namespace()
nsList = append(nsList, peerNs)
exutil.By("2. Create two pods in each namespace and label namespaces")
namedPortPod := namedPortPodResource{
name: "",
namespace: "",
podLabelKey: "name",
podLabelVal: "hello-pod",
portname: "",
containerport: 8080,
template: namedPortPodTemplate,
}
podNames := []string{"hello-pod-" + testID + "-1", "hello-pod-" + testID + "-2"}
portNames := []string{"web", "web123"}
for i := 0; i < 2; i++ {
namedPortPod.namespace = nsList[i]
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nsList[i], namespaceLabelKey+"="+namespaceLabelVal).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for j := 0; j < len(podNames); j++ {
namedPortPod.name = podNames[j]
namedPortPod.portname = portNames[j]
namedPortPod.createNamedPortPod(oc)
}
err = waitForPodWithLabelReady(oc, namedPortPod.namespace, namedPortPod.podLabelKey+"="+namedPortPod.podLabelVal)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label %s=%s in %s not ready", namedPortPod.podLabelKey, namedPortPod.podLabelVal, namedPortPod.namespace))
podListInNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[i], namedPortPod.podLabelKey+"="+namedPortPod.podLabelVal)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListInNs)).Should(o.Equal(2))
e2e.Logf("Pods %s in %s namespace", podListInNs, nsList[i])
}
exutil.By("3. Create a ANP with deny and pass action for ingress to projects with label team=qe")
anpCR := singleRuleANPMEPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: namespaceLabelKey,
subjectOperator: "In",
subjectVal: namespaceLabelVal,
priority: 25,
policyType: policyType,
direction: direction,
ruleName: "deny ingress",
ruleAction: "Deny",
ruleKey: namespaceLabelKey,
ruleOperator: "NotIn",
ruleVal: dummyLabel,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleANPMatchExp(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("3.1 Update ANP's first rule with named port")
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/ports\", \"value\": [\"namedPort\": %s]}]", policyType, portNames[0])
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Update ANP to add second ingress rule with named port")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/1\", \"value\": {\"name\":\"pass ingress\", \"action\": \"Pass\", \"from\": [{\"namespaces\": {\"matchLabels\": {%s: %s}}}], \"ports\":[{\"namedPort\": %s}]}}]", policyType, namespaceLabelKey, namespaceLabelVal, portNames[1])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("3.3 Validate traffic is blocked between pods with named port %s but passes through the pods with named ports %s", portNames[0], portNames[1]))
CurlPod2PodPass(oc, nsList[0], podNames[1], nsList[1], podNames[1])
CurlPod2PodPass(oc, nsList[1], podNames[1], nsList[0], podNames[1])
CurlPod2PodFail(oc, nsList[0], podNames[0], nsList[1], podNames[0])
CurlPod2PodFail(oc, nsList[1], podNames[0], nsList[0], podNames[0])
exutil.By("4. Create a BANP with deny and pass action for ingress to projects with label team=qe")
exutil.By("4.0 Update ANP change Deny action to Pass for first rule")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/name\", \"value\": \"pass ingress\"}, {\"op\": \"add\", \"path\":\"/spec/%s/0/action\", \"value\": \"Pass\"}]", policyType, policyType)
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
banpCR := multiPodMixedRuleBANPPolicyResource{
name: "default",
subjectKey: namespaceLabelKey,
subjectVal: namespaceLabelVal,
subjectPodKey: podKey,
subjectPodVal: podVal,
policyType1: policyType,
direction1: direction,
ruleName1: "default-allow-ingress",
ruleAction1: "Allow",
ruleKey1: "team",
ruleVal1: "qe",
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "egress",
direction2: "to",
ruleName2: "default-deny-from-colored-pods",
ruleAction2: "Deny",
ruleKey2: "team",
ruleVal2: "qe",
rulePodKey2: podKey,
rulePodVal2: podVal,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiPodMixedRuleBANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("4.1 Remove egress rule in BANP")
patchBANP := fmt.Sprintf("[{\"op\": \"remove\", \"path\":\"/spec/egress\"}]")
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("4.2 Update first rule with named port")
patchBANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/ports\", \"value\": [\"namedPort\": %s]}]", policyType, portNames[1])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("4.3 Add another rule with first named port")
patchBANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/1\", \"value\": {\"name\":\"deny ingress\", \"action\": \"Deny\", \"from\": [{\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {}}}], \"ports\":[{\"namedPort\": %s}]}}]", policyType, namespaceLabelKey, namespaceLabelVal, portNames[0])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("4.4 Validate traffic passes between pods with named port %s but is blocked between the pods with named ports %s", portNames[1], portNames[0]))
CurlPod2PodPass(oc, nsList[0], podNames[0], nsList[1], podNames[1])
CurlPod2PodPass(oc, nsList[1], podNames[0], nsList[0], podNames[1])
CurlPod2PodFail(oc, nsList[0], podNames[1], nsList[1], podNames[0])
CurlPod2PodFail(oc, nsList[1], podNames[1], nsList[0], podNames[0])
})
g.It("Author:asood-NonHyperShiftHOST-High-73454-[FdpOvnOvs] Egress traffic works with ANP, BANP and NP with node egress peer. [Serial]", func() {
var (
testID = "73454"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template-node.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
egressTypeFile = filepath.Join(testDataDir, "networkpolicy", "default-allow-egress.yaml")
httpServerPodNodeTemplate = filepath.Join(testDataDir, "httpserverPod-specific-node-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataDir, "ping-for-pod-specific-node-template.yaml")
containerport int32 = 30001
hostport int32 = 30003
direction = "to"
policyType = "egress"
nsMatchLabelKey = "kubernetes.io/metadata.name"
nodeLabels = []string{"qe", "ocp"}
labelledNodeMap = make(map[string]string)
nodePodMap = make(map[string]string)
newNodePodMap = make(map[string]string)
numWorkerNodes = 2
)
exutil.By("1.0 Get the worker nodes in the cluster")
workersList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workersList.Items) < numWorkerNodes {
g.Skip("Skipping the test as it requires two worker nodes, found insufficient worker nodes")
}
exutil.By("1.1 Label the worker nodes")
for i := 0; i < numWorkerNodes; i++ {
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workersList.Items[i].Name, "team", nodeLabels[i])
labelledNodeMap[nodeLabels[i]] = workersList.Items[i].Name
}
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, labelledNodeMap["ocp"], "team")
exutil.By("1.2 Create the pods on cluster network and pods that open port on worker nodes")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
httpServerPod := httpserverPodResourceNode{
name: "",
namespace: ns,
containerport: containerport,
hostport: hostport,
nodename: "",
template: httpServerPodNodeTemplate,
}
for i := 0; i < numWorkerNodes; i++ {
httpServerPod.name = "httpserverpod-" + testID + "-" + strconv.Itoa(i)
httpServerPod.nodename = workersList.Items[i].Name
httpServerPod.createHttpservePodNodeByAdmin(oc)
waitPodReady(oc, ns, httpServerPod.name)
}
pod := pingPodResourceNode{
name: "",
namespace: ns,
nodename: "",
template: pingPodNodeTemplate,
}
for i := 0; i < 2; i++ {
pod.name = "test-pod-" + testID + "-" + strconv.Itoa(i)
pod.nodename = workersList.Items[i].Name
pod.createPingPodNode(oc)
waitPodReady(oc, ns, pod.name)
nodePodMap[pod.nodename] = pod.name
}
exutil.By("1.3 Validate from the pods running on all the nodes, egress traffic from each node is allowed.\n")
nodeList := []string{labelledNodeMap["ocp"], labelledNodeMap["qe"]}
for _, egressNode := range nodeList {
// Ping between the nodes does not work on all clusters, therefore check allowed ICMP egress traffic from pod running on the node
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeTrue())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
}
exutil.By("2.0 Create BANP to block egress traffic from all the worker nodes.\n")
banp := singleRuleBANPPolicyResourceNode{
name: "default",
subjectKey: nsMatchLabelKey,
subjectVal: ns,
policyType: policyType,
direction: direction,
ruleName: "default-egress",
ruleAction: "Deny",
ruleKey: "kubernetes.io/hostname",
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banp.name)
banp.createSingleRuleBANPNode(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banp.name)).To(o.BeTrue())
exutil.By("2.1 Validate from the pods running on all the nodes, egress traffic from each node is blocked.\n")
nodeList = []string{labelledNodeMap["ocp"], labelledNodeMap["qe"]}
for _, egressNode := range nodeList {
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodeFail(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
}
exutil.By("3.0 Create ANP with egress traffic allowed from node labeled team=qe but blocked from other nodes.\n")
anp := singleRuleANPPolicyResourceNode{
name: "anp-node-egress-peer-" + testID,
subjectKey: nsMatchLabelKey,
subjectVal: ns,
priority: 40,
policyType: policyType,
direction: direction,
ruleName: "allow egress",
ruleAction: "Allow",
ruleKey: "team",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: nodeLabels[0],
actionname: "pass egress",
actiontype: "Pass",
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anp.name)
anp.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anp.name)).To(o.BeTrue())
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/1\", \"value\": {\"name\":\"deny egress\", \"action\": \"Deny\", \"to\": [{\"nodes\": {\"matchExpressions\": [{\"key\":\"team\", \"operator\": \"In\", \"values\":[%s]}]}}]}}]", policyType, nodeLabels[1])
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anp.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anp.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules %s after update : ", anpRules)
exutil.By("3.1 Validate from the pods running on all the nodes, egress traffic from node labeled team=qe is allowed.\n")
egressNode := labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeTrue())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("3.2 Validate from the pods running on all the nodes, egress traffic from the node labelled team=ocp is blocked.\n")
egressNode = labelledNodeMap["ocp"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodeFail(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("4.0 Update ANP with only HTTP egress traffic is allowed from node labeled team=qe and all other traffic blocked from other nodes")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/ports\", \"value\": [\"portRange\": {\"protocol\": \"TCP\", \"start\": %s, \"end\": %s}]}]", policyType, strconv.Itoa(int(containerport)), strconv.Itoa(int(hostport)))
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anp.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anp.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules %s after update : ", anpRules)
exutil.By("4.1 Validate from the pods running on all the nodes, only HTTP egress traffic is allowed from node labeled team=qe.\n")
egressNode = labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("5.0 Create new set of pods to validate ACLs are created as per (B)ANP already created.\n")
for i := 0; i < 2; i++ {
pod.name = "new-test-pod-" + testID + "-" + strconv.Itoa(i)
pod.nodename = workersList.Items[i].Name
pod.createPingPodNode(oc)
waitPodReady(oc, ns, pod.name)
newNodePodMap[pod.nodename] = pod.name
}
exutil.By("5.1 Validate from newly created pods on all the nodes, egress traffic from node with label team=ocp is blocked.\n")
egressNode = labelledNodeMap["ocp"]
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodeFail(oc, ns, newNodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("5.2 Validate from newly created pods on all the nodes, only HTTP egress traffic is allowed from node labeled team=qe.\n")
egressNode = labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, newNodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, newNodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("6.0 Create a NP to override BANP to allow egress traffic from node with no label\n")
createResourceFromFile(oc, ns, egressTypeFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "default-allow-egress")).To(o.BeTrue())
exutil.By("6.1 Remove the label team=qe from the node.\n")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, labelledNodeMap["qe"], "team")
exutil.By("6.2 Validate from pods on all the nodes, all egress traffic from node that had label team=qe is now allowed.\n")
egressNode = labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeTrue())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
})
g.It("Author:asood-High-73331-BANP and ANP metrics are available. [Serial]", func() {
var (
testID = "73331"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-pod-mixed-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-me-template.yaml")
anpNodeCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
namespaceLabelKey = "team"
namespaceLabelVal = "qe"
podKey = "name"
podVal = "hello-pod"
expectedBANPMetricsValue = make(map[string]string)
expectedANPMetricsValue = make(map[string]string)
banpEgress = make(map[string]string)
banpIngress = make(map[string]string)
anpEgress = make(map[string]string)
anpIngress = make(map[string]string)
)
// Initialize variables
banpMetricsList := []string{"ovnkube_controller_baseline_admin_network_policies", "ovnkube_controller_baseline_admin_network_policies_db_objects", "ovnkube_controller_baseline_admin_network_policies_rules"}
anpMetricsList := []string{"ovnkube_controller_admin_network_policies", "ovnkube_controller_admin_network_policies_db_objects", "ovnkube_controller_admin_network_policies_rules"}
actionList := []string{"Allow", "Deny", "Pass"}
dbObjects := []string{"ACL", "Address_Set"}
expectedBANPMetricsValue[banpMetricsList[0]] = "1"
expectedBANPMetricsValue[dbObjects[0]] = "2"
expectedANPMetricsValue[anpMetricsList[0]] = "1"
expectedANPMetricsValue[dbObjects[0]] = "1"
ipStackType := checkIPStackType(oc)
exutil.By("1. Create a BANP with two rules with Allow action for Ingress and Deny action for Egress")
banpCR := multiPodMixedRuleBANPPolicyResource{
name: "default",
subjectKey: namespaceLabelKey,
subjectVal: namespaceLabelVal,
subjectPodKey: podKey,
subjectPodVal: podVal,
policyType1: "ingress",
direction1: "from",
ruleName1: "default-allow-ingress",
ruleAction1: "Allow",
ruleKey1: namespaceLabelKey,
ruleVal1: namespaceLabelVal,
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "egress",
direction2: "to",
ruleName2: "default-deny-egress",
ruleAction2: "Deny",
ruleKey2: namespaceLabelVal,
ruleVal2: namespaceLabelVal,
rulePodKey2: podKey,
rulePodVal2: podVal,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiPodMixedRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("2.1 Validate %s metrics for BANP", banpMetricsList[0]))
getPolicyMetrics(oc, banpMetricsList[0], expectedBANPMetricsValue[banpMetricsList[0]])
// Address set
if ipStackType == "dualstack" {
expectedBANPMetricsValue[dbObjects[1]] = "4"
} else {
expectedBANPMetricsValue[dbObjects[1]] = "2"
}
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("2.2.%d Validate %s - %s metrics for BANP", i, banpMetricsList[1], dbObjects[i]))
getPolicyMetrics(oc, banpMetricsList[1], expectedBANPMetricsValue[dbObjects[i]], dbObjects[i])
}
banpEgress[actionList[1]] = "1"
banpIngress[actionList[0]] = "1"
ruleDirection := "Egress"
exutil.By(fmt.Sprintf("3. Validate metrics %s for BANP, %s rule and %s action", banpMetricsList[2], ruleDirection, actionList[1]))
getPolicyMetrics(oc, banpMetricsList[2], banpEgress[actionList[1]], ruleDirection, actionList[1])
ruleDirection = "Ingress"
exutil.By(fmt.Sprintf("4. Validate metrics %s for BANP, %s rule and %s action", banpMetricsList[2], ruleDirection, actionList[0]))
getPolicyMetrics(oc, banpMetricsList[2], banpIngress[actionList[0]], ruleDirection, actionList[0])
banpIngress[actionList[1]] = "1"
exutil.By(fmt.Sprintf("5. Update BANP to add another ingress rule and validate metrics %s", banpMetricsList[2]))
patchBANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/1\", \"value\": {\"name\":\"deny ingress\", \"action\": \"Deny\", \"from\": [{\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {}}}]}}]", namespaceLabelKey, namespaceLabelVal)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
getPolicyMetrics(oc, banpMetricsList[2], banpIngress[actionList[1]], ruleDirection, actionList[1])
exutil.By("6. Create a ANP with one ingress rule with deny action.")
anpCR := singleRuleANPMEPolicyResource{
name: "anp-" + testID + "-0",
subjectKey: namespaceLabelKey,
subjectOperator: "In",
subjectVal: namespaceLabelVal,
priority: 25,
policyType: "ingress",
direction: "from",
ruleName: "deny ingress",
ruleAction: "Deny",
ruleKey: namespaceLabelKey,
ruleOperator: "NotIn",
ruleVal: "ns" + testID,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleANPMatchExp(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
// Address set
if ipStackType == "dualstack" {
expectedANPMetricsValue[dbObjects[1]] = "2"
} else {
expectedANPMetricsValue[dbObjects[1]] = "1"
}
exutil.By(fmt.Sprintf("7.1 Validate %s metrics for ANP %s", anpMetricsList[0], anpCR.name))
getPolicyMetrics(oc, anpMetricsList[0], expectedANPMetricsValue[anpMetricsList[0]])
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("7.2.%d Validate %s - %s metrics for ANP %s", i, anpMetricsList[1], dbObjects[i], anpCR.name))
getPolicyMetrics(oc, anpMetricsList[1], expectedANPMetricsValue[dbObjects[i]], dbObjects[i])
}
ruleDirection = "Ingress"
anpIngress[actionList[1]] = "1"
exutil.By(fmt.Sprintf("8. Validate metrics %s for ANP, %s rule and %s action", anpMetricsList[2], ruleDirection, actionList[1]))
getPolicyMetrics(oc, anpMetricsList[2], anpIngress[actionList[1]], ruleDirection, actionList[1])
exutil.By("9. Create another ANP with egress pass and allow rule.")
anpNodeCR := singleRuleANPPolicyResourceNode{
name: "anp-" + testID + "-1",
subjectKey: namespaceLabelKey,
subjectVal: namespaceLabelVal,
priority: 40,
policyType: "egress",
direction: "to",
ruleName: "allow egress",
ruleAction: "Allow",
ruleKey: "team",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: "worker-1",
actionname: "pass egress",
actiontype: "Pass",
template: anpNodeCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpNodeCR.name)
anpNodeCR.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpNodeCR.name)).To(o.BeTrue())
ruleDirection = "Egress"
anpEgress[actionList[0]] = "1"
anpEgress[actionList[2]] = "1"
exutil.By(fmt.Sprintf("10. Validate metrics %s for ANP, %s rule and %s action", anpMetricsList[2], ruleDirection, actionList[0]))
getPolicyMetrics(oc, anpMetricsList[2], anpEgress[actionList[0]], ruleDirection, actionList[0])
exutil.By(fmt.Sprintf("11. Validate metrics %s for ANP, %s rule and %s action", anpMetricsList[2], ruleDirection, actionList[2]))
getPolicyMetrics(oc, anpMetricsList[2], anpEgress[actionList[2]], ruleDirection, actionList[2])
expectedANPMetricsValue[anpMetricsList[0]] = "2"
expectedANPMetricsValue[dbObjects[0]] = "3"
// Address set
if ipStackType == "dualstack" {
expectedANPMetricsValue[dbObjects[1]] = "6"
} else {
expectedANPMetricsValue[dbObjects[1]] = "3"
}
exutil.By(fmt.Sprintf("12.1 Validate %s metrics for both ANP policies", anpMetricsList[0]))
getPolicyMetrics(oc, anpMetricsList[0], expectedANPMetricsValue[anpMetricsList[0]])
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("12.2.%d Validate %s - %s metrics for both ANP policies", i, anpMetricsList[1], dbObjects[i]))
getPolicyMetrics(oc, anpMetricsList[1], expectedANPMetricsValue[dbObjects[i]], dbObjects[i])
}
})
g.It("Author:asood-Longduration-NonPreRelease-High-73453-[FdpOvnOvs] Egress traffic works with ANP, BANP and NP with network egress peer. [Serial]", func() {
var (
testID = "73453"
testDataDir = exutil.FixturePath("testdata", "networking")
banpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
anpMultiNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-rule-cidr-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataDir, "ping-for-pod-specific-node-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-egress-single-CIDR-template.yaml")
matchLabelKey = "team"
matchLabelVal = "ocp"
matchLabelKey1 = "kubernetes.io/metadata.name"
nsPodMap = make(map[string][]string)
urlToLookup = "www.facebook.com"
)
if checkProxy(oc) {
g.Skip("This cluster has proxy configured, egress access cannot be tested on the cluster, skip the test.")
}
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
if !checkIPv6PublicAccess(oc) {
g.Skip("This cluster is dualstack/IPv6 with no access to public websites, egress access cannot be tested on the cluster, skip the test.")
}
}
var allCIDRs, googleIP1, googleIP2, googleDNSServerIP1, googleDNSServerIP2, patchANPCIDR, patchNP string
var allNS, checkIPAccessList []string
exutil.By("0. Get the workers list ")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1.1 Create another namespace")
allNS = append(allNS, oc.Namespace())
oc.SetupProject()
allNS = append(allNS, oc.Namespace())
exutil.By("1.2 Label namespaces.")
for i := 0; i < len(allNS); i++ {
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[i], matchLabelKey+"="+matchLabelVal).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("1.3 Create 2 pods in each namespace")
pod := pingPodResourceNode{
name: "",
namespace: "",
nodename: "",
template: pingPodNodeTemplate,
}
for i := 0; i < len(allNS); i++ {
pod.nodename = workerList.Items[0].Name
pod.namespace = allNS[i]
for j := 0; j < 2; j++ {
pod.name = "test-pod-" + testID + "-" + strconv.Itoa(j)
pod.createPingPodNode(oc)
waitPodReady(oc, allNS[i], pod.name)
nsPodMap[allNS[i]] = append(nsPodMap[allNS[i]], pod.name)
}
}
exutil.By("2. Get one IP address for domain name www.google.com")
ipv4, ipv6 := getIPFromDnsName("www.google.com")
o.Expect(len(ipv4) == 0).NotTo(o.BeTrue())
checkIPAccessList = append(checkIPAccessList, ipv4)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
o.Expect(len(ipv6) == 0).NotTo(o.BeTrue())
checkIPAccessList = append(checkIPAccessList, ipv6)
}
// Set up networks to be used in (B)ANP
switch ipStackType {
case "ipv4single":
allCIDRs = "0.0.0.0/0"
googleIP1 = ipv4 + "/32"
googleDNSServerIP1 = "8.8.8.8/32"
case "ipv6single":
allCIDRs = "::/0"
googleIP1 = ipv6 + "/128"
googleDNSServerIP1 = "2001:4860:4860::8888/128"
case "dualstack":
allCIDRs = "0.0.0.0/0"
googleIP1 = ipv4 + "/32"
googleIP2 = ipv6 + "/128"
googleDNSServerIP1 = "8.8.8.8/32"
googleDNSServerIP2 = "2001:4860:4860::8888/128"
default:
// Do nothing
}
exutil.By("3.1 Egress traffic works before BANP is created")
for i := 0; i < 2; i++ {
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, true)
}
}
exutil.By("3.2 Create a BANP to deny egress to all networks from all namespaces")
banpCIDR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: matchLabelVal,
ruleName: "deny egress to all networks",
ruleAction: "Deny",
cidr: allCIDRs,
template: banpNetworkTemplate,
}
defer removeResource(oc, true, true, "banp", banpCIDR.name)
banpCIDR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCIDR.name)).To(o.BeTrue())
if ipStackType == "dualstack" {
patchBANPCIDR := `[{"op": "add", "path": "/spec/egress/0/to/0/networks/1", "value":"::/0"}]`
patchReplaceResourceAsAdmin(oc, "banp/"+banpCIDR.name, patchBANPCIDR)
banpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp", banpCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n BANP Rules after update: %s", banpRules)
}
exutil.By("3.3 Egress traffic does not works after BANP is created")
for i := 0; i < 2; i++ {
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, false)
}
}
exutil.By("4.0 Create a ANP to allow egress traffic to TCP port 80 and verify egress traffic works from first namespace")
anpCIDR := singleRuleCIDRANPPolicyResource{
name: "anp-network-egress-peer-" + testID,
subjectKey: matchLabelKey1,
subjectVal: allNS[0],
priority: 45,
ruleName: "allow egress network from first namespace",
ruleAction: "Allow",
cidr: googleIP1,
template: anpNetworkTemplate,
}
defer removeResource(oc, true, true, "anp", anpCIDR.name)
anpCIDR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCIDR.name)).To(o.BeTrue())
if ipStackType == "dualstack" {
patchANPCIDR = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 80}}]}, {"op": "add", "path": "/spec/egress/0/to/0/networks/1", "value":"` + googleIP2 + `"} ]`
} else {
patchANPCIDR = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 80}}]}]`
}
patchReplaceResourceAsAdmin(oc, "anp/"+anpCIDR.name, patchANPCIDR)
anpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
exutil.By("4.1 Egress traffic allowed from first but blocked from second namespace after ANP is created.")
resultList := []bool{true, false}
for i, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, resultList[i])
}
exutil.By("4.2 Egress traffic allowed from newly created pod in first namespace but blocked from second namespace.")
for i := 0; i < len(allNS); i++ {
pod.nodename = workerList.Items[0].Name
pod.namespace = allNS[i]
pod.name = "test-pod-" + testID + "-" + "3"
pod.createPingPodNode(oc)
waitPodReady(oc, allNS[i], pod.name)
nsPodMap[allNS[i]] = append(nsPodMap[allNS[i]], pod.name)
}
for i, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][2], allNS[i], ip, resultList[i])
}
exutil.By("5.0 Create a ANP to allow egress traffic to TCP port 80 from pod labeled color=red in second namespace")
anpMultiCIDR := MultiRuleCIDRANPPolicyResource{
name: "anp-network-egress-peer-" + testID + "-0",
subjectKey: matchLabelKey1,
subjectVal: allNS[1],
priority: 30,
ruleName1: "egress to TCP server",
ruleAction1: "Allow",
cidr1: googleIP1,
ruleName2: "egress to UDP server",
ruleAction2: "Allow",
cidr2: googleDNSServerIP1,
template: anpMultiNetworkTemplate,
}
defer removeResource(oc, true, true, "anp", anpMultiCIDR.name)
anpMultiCIDR.createMultiRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpMultiCIDR.name)).To(o.BeTrue())
exutil.By("5.1 Update the rules to add port & protocol and subject to apply rules to specific pod")
if ipStackType == "dualstack" {
patchANPCIDR = fmt.Sprintf("[ {\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {\"matchLabels\": {\"color\": \"red\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\":%s}, {\"op\": \"add\", \"path\": \"/spec/egress/1/to/0/networks/1\", \"value\":%s}]", matchLabelKey1, allNS[1], googleIP2, googleDNSServerIP2)
} else {
patchANPCIDR = fmt.Sprintf("[ {\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {\"matchLabels\": {\"color\": \"red\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}]", matchLabelKey1, allNS[1])
}
patchReplaceResourceAsAdmin(oc, "anp/"+anpMultiCIDR.name, patchANPCIDR)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
anpSubject, subErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.subject}").Output()
o.Expect(subErr).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Subject after update: %s", anpSubject)
exutil.By("5.2 Label the pod")
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", nsPodMap[allNS[1]][2], "-n", allNS[1], "color=red").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("7.1 Validate TCP and UDP egress traffic from labelled pod in second namespace")
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[1]][2], allNS[1], ip, true)
}
verifyNslookup(oc, nsPodMap[allNS[1]][2], allNS[1], urlToLookup, true)
exutil.By("7.2 Validate TCP egress traffic from unlabelled pod in second namespace and from pod in first namespace works is not impacted")
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[1]][0], allNS[1], ip, false)
}
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[0]][2], allNS[0], ip, true)
}
verifyNslookup(oc, nsPodMap[allNS[1]][0], allNS[1], urlToLookup, false)
verifyNslookup(oc, nsPodMap[allNS[0]][0], allNS[0], urlToLookup, false)
exutil.By("8.0 Create third ANP to allow egress traffic from pod labeled color=blue in both namespaces")
anpMultiCIDR.name = "anp-network-egress-peer-" + testID + "-1"
anpMultiCIDR.priority = 25
// Rule 1
anpMultiCIDR.ruleName1 = "egress to udp server"
anpMultiCIDR.ruleAction1 = "Pass"
anpMultiCIDR.cidr1 = googleDNSServerIP1
// Rule 2
anpMultiCIDR.ruleName2 = "egress to tcp server"
anpMultiCIDR.ruleAction2 = "Allow"
anpMultiCIDR.cidr2 = googleIP1
defer removeResource(oc, true, true, "anp", anpMultiCIDR.name)
anpMultiCIDR.createMultiRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpMultiCIDR.name)).To(o.BeTrue())
exutil.By("8.1 Update the rules to add port & protocol and subject to apply rules to pods labelled blue")
if ipStackType == "dualstack" {
patchANPCIDR = fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchExpressions\": [{\"key\": %s, \"operator\": \"In\", \"values\": [%s, %s]}]}, \"podSelector\": {\"matchLabels\": {\"color\": \"blue\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\":%s}, {\"op\": \"add\", \"path\": \"/spec/egress/1/to/0/networks/1\", \"value\":%s}]", matchLabelKey1, allNS[0], allNS[1], googleDNSServerIP2, googleIP2)
} else {
patchANPCIDR = fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchExpressions\": [{\"key\": %s, \"operator\": \"In\", \"values\": [%s, %s]}]}, \"podSelector\": {\"matchLabels\": {\"color\": \"blue\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}]", matchLabelKey1, allNS[0], allNS[1])
}
patchReplaceResourceAsAdmin(oc, "anp/"+anpMultiCIDR.name, patchANPCIDR)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.subject}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Subject after update: %s", anpRules)
exutil.By("8.2 Label first pod in both namespace color=blue")
for i := 0; i < 2; i++ {
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", nsPodMap[allNS[i]][0], "-n", allNS[i], "color=blue").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
}
exutil.By("8.3 Validate only egress to TCP 80 works")
for i := 0; i < 2; i++ {
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, true)
}
verifyNslookup(oc, nsPodMap[allNS[i]][0], allNS[i], urlToLookup, false)
}
exutil.By("8.4 Create a network policy in first namespace")
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: googleDNSServerIP1,
namespace: allNS[0],
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
if ipStackType == "dualstack" {
patchNP = `[{"op": "replace", "path": "/spec/podSelector", "value": {"matchLabels": {"color": "blue"}}}, {"op": "add", "path": "/spec/egress/0/to/1", "value": {"ipBlock":{"cidr":"` + googleDNSServerIP2 + `"}}} ]`
} else {
patchNP = `[{"op": "replace", "path": "/spec/podSelector", "value": {"matchLabels": {"color": "blue"}}}]`
}
patchReplaceResourceAsAdmin(oc, "networkpolicy/"+npIPBlockNS1.name, patchNP, allNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", npIPBlockNS1.name, "-n", allNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
exutil.By("8.5 Validate egress to DNS server at port 53 only works from pod in first namespace")
verifyNslookup(oc, nsPodMap[allNS[0]][0], allNS[0], urlToLookup, true)
verifyNslookup(oc, nsPodMap[allNS[1]][0], allNS[1], urlToLookup, false)
exutil.By("8.6 Update rule with pass action to allow to see egress UDP traffic works from with pod label color=blue in second namespace")
patchANPCIDR = `[{"op": "replace", "path": "/spec/egress/0/action", "value": "Allow"}]`
patchReplaceResourceAsAdmin(oc, "anp/"+anpMultiCIDR.name, patchANPCIDR)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
verifyNslookup(oc, nsPodMap[allNS[1]][0], allNS[1], urlToLookup, true)
})
})
// RDU Test cases
var _ = g.Describe("[sig-networking] SDN adminnetworkpolicy rdu", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-"+getRandomString(), exutil.KubeConfigPath())
testDataDir = exutil.FixturePath("testdata", "networking")
)
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !(isPlatformSuitable(oc)) || !strings.Contains(networkType, "ovn") {
g.Skip("These cases can only be run on clusters on networking team's private BM RDU and with OVNK network plugin, skip for other platforms.")
}
})
g.It("Author:asood-High-73963-[rducluster] BANP and ANP with AdminpolicybasedExternalRoutes (APBR). [Serial]", func() {
var (
testID = "73963"
anpNodeTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
banpNodeTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template-node.yaml")
banpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataDir, "ping-for-pod-specific-node-template.yaml")
gwPodNodeTemplate = filepath.Join(testDataDir, "gw-pod-hostnetwork-template.yaml")
httpServerPodNodeTemplate = filepath.Join(testDataDir, "httpserverPod-specific-node-template.yaml")
apbrDynamicTemplate = filepath.Join(testDataDir, "apbexternalroute-dynamic-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nodePodMap = make(map[string]string)
containerport int32 = 30001
hostport int32 = 30003
)
exutil.By("0. Get the non sriov and sriov workers list")
workers := excludeSriovNodes(oc)
if len(workers) < 3 {
g.Skip("This test can only be run for cluster that has atleast 3 non sriov worker nodes.")
}
sriovWorkers := getSriovNodes(oc)
if len(workers) < 1 {
g.Skip("This test can only be run for cluster that has atleast 1 sriov worker node.")
}
exutil.By("1. Create the served pods in the first namespace on sriov node and non sriov node")
servedNs := oc.Namespace()
exutil.SetNamespacePrivileged(oc, servedNs)
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", servedNs, "multiple_gws=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pod := pingPodResourceNode{
name: "test-pod-" + testID + "-0",
namespace: servedNs,
nodename: sriovWorkers[0],
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, servedNs, pod.name)
nodePodMap[pod.nodename] = pod.name
pod.name = "test-pod-" + testID + "-1"
pod.nodename = workers[2]
pod.createPingPodNode(oc)
waitPodReady(oc, servedNs, pod.name)
nodePodMap[pod.nodename] = pod.name
exutil.By("2. Create second namespace for the serving pod.")
oc.SetupProject()
servingNs := oc.Namespace()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", servingNs, "gws=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+servingNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("2.1. Create the serving pod in serving namespace %s", servingNs))
pod.name = "ext-gw-" + testID
pod.namespace = servingNs
pod.nodename = workers[0]
pod.template = gwPodNodeTemplate
pod.createPingPodNode(oc)
waitPodReady(oc, servingNs, pod.name)
nodePodMap[pod.nodename] = pod.name
gwPodNodeIP := getNodeIPv4(oc, servingNs, workers[0])
exutil.By("3. Create third namespace for the host port pod.")
oc.SetupProject()
hostPortPodNs := oc.Namespace()
exutil.SetNamespacePrivileged(oc, hostPortPodNs)
exutil.By(fmt.Sprintf("3.1 Create a host port pod in %s", hostPortPodNs))
httpServerPod := httpserverPodResourceNode{
name: "hostportpod-" + testID,
namespace: hostPortPodNs,
containerport: containerport,
hostport: hostport,
nodename: workers[1],
template: httpServerPodNodeTemplate,
}
httpServerPod.createHttpservePodNodeByAdmin(oc)
waitPodReady(oc, hostPortPodNs, httpServerPod.name)
nodePodMap[httpServerPod.nodename] = httpServerPod.name
exutil.By("4. Create admin policy based dynamic external routes")
apbr := apbDynamicExternalRoute{
name: "apbr-" + testID,
labelKey: "multiple_gws",
labelValue: "true",
podLabelKey: "gw",
podLabelValue: "true",
namespaceLabelKey: "gws",
namespaceLabelValue: "true",
bfd: true,
template: apbrDynamicTemplate,
}
defer removeResource(oc, true, true, "apbexternalroute", apbr.name)
apbr.createAPBDynamicExternalRoute(oc)
apbExtRouteCheckErr := checkAPBExternalRouteStatus(oc, apbr.name, "Success")
o.Expect(apbExtRouteCheckErr).NotTo(o.HaveOccurred())
exutil.By("5. Get one IP address for domain name www.google.com")
ipv4, _ := getIPFromDnsName("www.google.com")
o.Expect(len(ipv4) == 0).NotTo(o.BeTrue())
exutil.By("6.1 Egress traffic works before BANP is created")
verifyDstIPAccess(oc, nodePodMap[sriovWorkers[0]], servedNs, ipv4, true)
exutil.By("6.2 Create a BANP to deny egress to all networks")
banpCIDR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: servedNs,
ruleName: "deny egress to all networks",
ruleAction: "Deny",
cidr: "0.0.0.0/0",
template: banpNetworkTemplate,
}
defer removeResource(oc, true, true, "banp", banpCIDR.name)
banpCIDR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCIDR.name)).To(o.BeTrue())
exutil.By("6.3 Egress traffic does not works after BANP is created")
verifyDstIPAccess(oc, nodePodMap[sriovWorkers[0]], servedNs, ipv4, false)
exutil.By("7. Create a ANP to allow traffic to host running http server and verify egress traffic works")
anpCIDR := singleRuleCIDRANPPolicyResource{
name: "anp-network-egress-peer-" + testID,
subjectKey: matchLabelKey,
subjectVal: servedNs,
priority: 10,
ruleName: "allow egress to gateway pod",
ruleAction: "Allow",
cidr: gwPodNodeIP + "/32",
template: anpNetworkTemplate,
}
defer removeResource(oc, true, true, "anp", anpCIDR.name)
anpCIDR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCIDR.name)).To(o.BeTrue())
patchANPCIDR := fmt.Sprintf("[{\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\": %s/24}]", ipv4)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCIDR.name, "--type=json", "-p", patchANPCIDR).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
exutil.By("7.1 Egress traffic works after ANP is created")
verifyDstIPAccess(oc, nodePodMap[sriovWorkers[0]], servedNs, ipv4, true)
exutil.By("8.0 Delete BANP and ANP")
removeResource(oc, true, true, "anp", anpCIDR.name)
removeResource(oc, true, true, "banp", banpCIDR.name)
exutil.By("9.1 Validate egress traffic before BANP is created.")
CurlPod2NodePass(oc, servedNs, nodePodMap[sriovWorkers[0]], workers[1], strconv.Itoa(int(hostport)))
exutil.By("9.2 Create BANP to block egress traffic from all the worker nodes.")
banpNode := singleRuleBANPPolicyResourceNode{
name: "default",
subjectKey: matchLabelKey,
subjectVal: servedNs,
policyType: "egress",
direction: "to",
ruleName: "default egress from all nodes",
ruleAction: "Deny",
ruleKey: "kubernetes.io/hostname",
template: banpNodeTemplate,
}
defer removeResource(oc, true, true, "banp", banpNode.name)
banpNode.createSingleRuleBANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpNode.name)).To(o.BeTrue())
exutil.By("9.3 Validate egress traffic after BANP is created.")
CurlPod2NodeFail(oc, servedNs, nodePodMap[sriovWorkers[0]], workers[1], strconv.Itoa(int(hostport)))
CurlPod2NodeFail(oc, servedNs, nodePodMap[workers[2]], workers[1], strconv.Itoa(int(hostport)))
exutil.By("10.0 Create ANP with egress traffic allowed from nodes that have a served pod and serving pod scheduled")
anpNode := singleRuleANPPolicyResourceNode{
name: "anp-node-egress-peer-" + testID,
subjectKey: matchLabelKey,
subjectVal: servedNs,
priority: 10,
policyType: "egress",
direction: "to",
ruleName: "allow egress",
ruleAction: "Allow",
ruleKey: "kubernetes.io/hostname",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: workers[0],
actionname: "pass egress",
actiontype: "Pass",
template: anpNodeTemplate,
}
defer removeResource(oc, true, true, "anp", anpNode.name)
anpNode.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpNode.name)).To(o.BeTrue())
patchANP := fmt.Sprintf("[{\"op\": \"remove\", \"path\":\"/spec/egress/1\"}, {\"op\": \"replace\", \"path\":\"/spec/egress/0/to/0/nodes/matchExpressions/0/values\", \"value\":[%s, %s, %s] }]", workers[0], workers[1], sriovWorkers[0])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpNode.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpNode.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
CurlPod2NodePass(oc, servedNs, nodePodMap[sriovWorkers[0]], workers[1], strconv.Itoa(int(hostport)))
CurlPod2NodeFail(oc, servedNs, nodePodMap[workers[2]], workers[1], strconv.Itoa(int(hostport)))
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
7c44a2b4-c4d6-4fdd-81d6-629d684fd99b
|
Author:asood-High-67103-[FdpOvnOvs] Egress BANP, NP and ANP policy with allow, deny and pass action. [Serial]
|
['"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-67103-[FdpOvnOvs] Egress BANP, NP and ANP policy with allow, deny and pass action. [Serial]", func() {
var (
testID = "67103"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
egressPolicyTypeFile = filepath.Join(testDataDir, "networkpolicy/allow-egress-red.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
targetPods = make(map[string]string)
podColors = []string{"red", "blue"}
nsList = []string{}
)
exutil.By("1. Get the first namespace (subject) and create another (target)")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
oc.SetupProject()
targetNs := oc.Namespace()
nsList = append(nsList, targetNs)
exutil.By("2. Create two pods in each namespace")
rcPingPodResource := replicationControllerPingPodResource{
name: testID + "-test-pod",
replicas: 2,
namespace: "",
template: rcPingPodTemplate,
}
for i := 0; i < 2; i++ {
rcPingPodResource.namespace = nsList[i]
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", subjectNs)
rcPingPodResource.createReplicaController(oc)
err := waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
}
podListSubjectNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[0], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListSubjectNs)).Should(o.Equal(2))
podListTargetNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[1], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListTargetNs)).Should(o.Equal(2))
exutil.By("3. Label pod in target namespace")
for i := 0; i < 2; i++ {
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", podListTargetNs[i], "-n", targetNs, "type="+podColors[i]).Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
targetPods[podColors[i]] = podListTargetNs[i]
}
exutil.By("4. Create a Baseline Admin Network Policy with deny action")
banpCR := singleRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
policyType: "egress",
direction: "to",
ruleName: "default-deny-to-" + targetNs,
ruleAction: "Deny",
ruleKey: matchLabelKey,
ruleVal: targetNs,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("5. Verify BANP blocks all egress traffic from %s to %s", subjectNs, targetNs))
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, podListTargetNs[j])
}
}
exutil.By("6. Create a network policy with egress rule")
createResourceFromFile(oc, subjectNs, egressPolicyTypeFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "allow-egress-to-red")).To(o.BeTrue())
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", targetNs, "team=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7. Verify network policy overrides BANP and only egress to pods labeled type=red works")
for i := 0; i < 2; i++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["red"])
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["blue"])
}
exutil.By("8. Verify ANP with different actions and priorities")
anpIngressRuleCR := singleRuleANPPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 10,
policyType: "egress",
direction: "to",
ruleName: "allow-to-" + targetNs,
ruleAction: "Allow",
ruleKey: matchLabelKey,
ruleVal: targetNs,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("8.1 Verify ANP priority %v with name %s action %s egress traffic from %s to %s", anpIngressRuleCR.priority, anpIngressRuleCR.name, anpIngressRuleCR.ruleAction, subjectNs, targetNs))
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, podListTargetNs[j])
}
}
anpIngressRuleCR.name = "anp-" + testID + "-2"
anpIngressRuleCR.priority = 5
anpIngressRuleCR.ruleName = "deny-to-" + targetNs
anpIngressRuleCR.ruleAction = "Deny"
exutil.By(fmt.Sprintf(" 8.2 Verify ANP priority %v with name %s action %s egress traffic from %s to %s", anpIngressRuleCR.priority, anpIngressRuleCR.name, anpIngressRuleCR.ruleAction, subjectNs, targetNs))
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, podListTargetNs[j])
}
}
anpIngressRuleCR.name = "anp-" + testID + "-3"
anpIngressRuleCR.priority = 0
anpIngressRuleCR.ruleName = "pass-to-" + targetNs
anpIngressRuleCR.ruleAction = "Pass"
exutil.By(fmt.Sprintf("8.3 Verify ANP priority %v with name %s action %s egress traffic from %s to %s", anpIngressRuleCR.priority, anpIngressRuleCR.name, anpIngressRuleCR.ruleAction, subjectNs, targetNs))
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
for i := 0; i < 2; i++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["red"])
CurlPod2PodFail(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["blue"])
}
exutil.By("9. Change label on type=blue to red and verify traffic")
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", targetPods["blue"], "-n", targetNs, "type="+podColors[0], "--overwrite").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
for i := 0; i < 2; i++ {
CurlPod2PodPass(oc, subjectNs, podListSubjectNs[i], targetNs, targetPods["blue"])
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
dfa43752-26e1-438b-a4cb-9dfae141874d
|
Author:asood-High-67104-[FdpOvnOvs] Ingress BANP, NP and ANP policy with allow, deny and pass action. [Serial]
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-67104-[FdpOvnOvs] Ingress BANP, NP and ANP policy with allow, deny and pass action. [Serial]", func() {
var (
testID = "67104"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template.yaml")
anpMultiRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
ingressNPPolicyTemplate = filepath.Join(testDataDir, "networkpolicy/generic-networkpolicy-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nsList = []string{}
policyType = "ingress"
direction = "from"
nsPod = make(map[string]string)
)
exutil.By("1. Get the first namespace (subject) and create three (source) namespaces")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
for i := 0; i < 3; i++ {
oc.SetupProject()
sourceNs := oc.Namespace()
nsList = append(nsList, sourceNs)
}
e2e.Logf("Project list %v", nsList)
exutil.By("2. Create a pod in all the namespaces")
rcPingPodResource := replicationControllerPingPodResource{
name: "",
replicas: 1,
namespace: "",
template: rcPingPodTemplate,
}
for i := 0; i < 4; i++ {
rcPingPodResource.namespace = nsList[i]
rcPingPodResource.name = testID + "-test-pod-" + strconv.Itoa(i)
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", nsList[i])
rcPingPodResource.createReplicaController(oc)
err := waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[i], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(1))
nsPod[nsList[i]] = podListNs[0]
e2e.Logf(fmt.Sprintf("Project %s has pod %s", nsList[i], nsPod[nsList[i]]))
}
exutil.By("3. Create a Baseline Admin Network Policy with ingress allow action for first two namespaces and deny for third")
banpCR := multiRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
policyType: policyType,
direction: direction,
ruleName1: "default-allow-from-" + nsList[1],
ruleAction1: "Allow",
ruleKey1: matchLabelKey,
ruleVal1: nsList[1],
ruleName2: "default-allow-from-" + nsList[2],
ruleAction2: "Allow",
ruleKey2: matchLabelKey,
ruleVal2: nsList[2],
ruleName3: "default-deny-from-" + nsList[3],
ruleAction3: "Deny",
ruleKey3: matchLabelKey,
ruleVal3: nsList[3],
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("4. Verify the traffic coming into subject namespace %s is allowed from first two namespaces and denied from third", nsList[0]))
for i := 1; i < 3; i++ {
CurlPod2PodPass(oc, nsList[i], nsPod[nsList[i]], nsList[0], nsPod[nsList[0]])
}
CurlPod2PodFail(oc, nsList[3], nsPod[nsList[3]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("5. Create another Admin Network Policy with ingress deny action to %s from %s namespace", nsList[0], nsList[2]))
anpEgressRuleCR := singleRuleANPPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 17,
policyType: "ingress",
direction: "from",
ruleName: "deny-from-" + nsList[2],
ruleAction: "Deny",
ruleKey: matchLabelKey,
ruleVal: nsList[2],
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpEgressRuleCR.name)
anpEgressRuleCR.createSingleRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("6. Verify traffic from %s to %s is denied", nsList[2], nsList[0]))
CurlPod2PodFail(oc, nsList[2], nsPod[nsList[2]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("7. Create another Admin Network Policy with ingress deny action to %s and pass action to %s and %s from %s namespace with higher priority", nsList[0], nsList[1], nsList[2], nsList[3]))
anpEgressMultiRuleCR := multiRuleANPPolicyResource{
name: "anp-" + testID + "-2",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 16,
policyType: "ingress",
direction: "from",
ruleName1: "deny-from-" + nsList[1],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[1],
ruleName2: "pass-from-" + nsList[2],
ruleAction2: "Pass",
ruleKey2: matchLabelKey,
ruleVal2: nsList[2],
ruleName3: "pass-from-" + nsList[3],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[3],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpEgressMultiRuleCR.name)
anpEgressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("8. Verify traffic from %s to %s is allowed due to action %s", nsList[2], nsList[0], anpEgressMultiRuleCR.ruleAction2))
CurlPod2PodPass(oc, nsList[2], nsPod[nsList[2]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("9. Verify traffic from %s and %s to %s is denied", nsList[1], nsList[3], nsList[0]))
CurlPod2PodFail(oc, nsList[1], nsPod[nsList[1]], nsList[0], nsPod[nsList[0]])
CurlPod2PodFail(oc, nsList[3], nsPod[nsList[3]], nsList[0], nsPod[nsList[0]])
exutil.By(fmt.Sprintf("10. Create a networkpolicy in %s for ingress from %s and %s", subjectNs, nsList[1], nsList[3]))
matchStr := "matchLabels"
networkPolicyResource := networkPolicyResource{
name: "ingress-" + testID + "-networkpolicy",
namespace: subjectNs,
policy: "ingress",
policyType: "Ingress",
direction1: "from",
namespaceSel1: matchStr,
namespaceSelKey1: matchLabelKey,
namespaceSelVal1: nsList[1],
direction2: "from",
namespaceSel2: matchStr,
namespaceSelKey2: matchLabelKey,
namespaceSelVal2: nsList[3],
template: ingressNPPolicyTemplate,
}
networkPolicyResource.createNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("12. Verify ingress traffic from %s and %s is denied and alllowed from %s", nsList[1], nsList[2], nsList[3]))
for i := 1; i < 2; i++ {
CurlPod2PodFail(oc, nsList[i], nsPod[nsList[i]], nsList[0], nsPod[nsList[0]])
}
CurlPod2PodPass(oc, nsList[3], nsPod[nsList[3]], nsList[0], nsPod[nsList[0]])
})
| |||||
test case
|
openshift/openshift-tests-private
|
45d64f00-d2f0-4486-9be7-6f0163b7ec92
|
Author:asood-Longduration-NonPreRelease-High-67105-[FdpOvnOvs] Ingress BANP, ANP and NP with allow, deny and pass action with TCP, UDP and SCTP protocols. [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-Longduration-NonPreRelease-High-67105-[FdpOvnOvs] Ingress BANP, ANP and NP with allow, deny and pass action with TCP, UDP and SCTP protocols. [Serial]", func() {
var (
testID = "67105"
testDataDir = exutil.FixturePath("testdata", "networking")
sctpTestDataDir = filepath.Join(testDataDir, "sctp")
sctpClientPod = filepath.Join(sctpTestDataDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(sctpTestDataDir, "sctpserver.yaml")
sctpModule = filepath.Join(sctpTestDataDir, "load-sctp-module.yaml")
udpListenerPod = filepath.Join(testDataDir, "udp-listener.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-rule-template.yaml")
anpMultiRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
ingressNPPolicyTemplate = filepath.Join(testDataDir, "networkpolicy/generic-networkpolicy-protocol-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nsList = []string{}
udpPort = "8181"
policyType = "ingress"
direction = "from"
matchStr = "matchLabels"
)
exutil.By("1. Test setup")
exutil.By("Enable SCTP on all workers")
prepareSCTPModule(oc, sctpModule)
exutil.By("Get the first namespace, create three additional namespaces and label all except the subject namespace")
nsList = append(nsList, oc.Namespace())
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nsList[0], "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
for i := 0; i < 2; i++ {
oc.SetupProject()
peerNs := oc.Namespace()
nsLabelErr = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", peerNs, "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
nsList = append(nsList, peerNs)
}
oc.SetupProject()
subjectNs := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, subjectNs)
exutil.SetNamespacePrivileged(oc, subjectNs)
exutil.By("2. Create a Baseline Admin Network Policy with deny action for each peer namespace")
banpCR := multiRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
policyType: policyType,
direction: direction,
ruleName1: "default-deny-from-" + nsList[0],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "default-deny-from-" + nsList[1],
ruleAction2: "Deny",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "default-deny-from-" + nsList[2],
ruleAction3: "Deny",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("3. Create workload in namespaces")
exutil.By(fmt.Sprintf("Create clients in peer namespaces and SCTP/UDP/TCP services in the subject %s namespace", subjectNs))
for i := 0; i < 3; i++ {
exutil.By(fmt.Sprintf("Create SCTP client pod in %s", nsList[0]))
createResourceFromFile(oc, nsList[i], sctpClientPod)
err1 := waitForPodWithLabelReady(oc, nsList[i], "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "SCTP client pod is not running")
}
exutil.By(fmt.Sprintf("Create SCTP server pod in %s", subjectNs))
createResourceFromFile(oc, subjectNs, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, subjectNs, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "SCTP server pod is not running")
exutil.By(fmt.Sprintf("Create a pod in %s for TCP", subjectNs))
rcPingPodResource := replicationControllerPingPodResource{
name: "test-pod-" + testID,
replicas: 1,
namespace: subjectNs,
template: rcPingPodTemplate,
}
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", rcPingPodResource.namespace)
rcPingPodResource.createReplicaController(oc)
err = waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(1))
exutil.By(fmt.Sprintf("Create UDP Listener Pod in %s", subjectNs))
createResourceFromFile(oc, subjectNs, udpListenerPod)
err = waitForPodWithLabelReady(oc, subjectNs, "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "The pod with label name=udp-pod not ready")
var udpPodName []string
udpPodName = getPodName(oc, subjectNs, "name=udp-pod")
exutil.By(fmt.Sprintf("4. All type of ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("5. Create ANP for TCP with ingress allow action from %s, deny from %s and pass action from %s to %s", nsList[0], nsList[1], nsList[2], subjectNs))
anpIngressMultiRuleCR := multiRuleANPPolicyResource{
name: "anp-ingress-tcp-" + testID + "-0",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 15,
policyType: policyType,
direction: direction,
ruleName1: "allow-from-" + nsList[0],
ruleAction1: "Allow",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "deny-from-" + nsList[1],
ruleAction2: "Deny",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "pass-from-" + nsList[2],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressMultiRuleCR.name)
anpIngressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("5.1 Update protocol for each rule"))
for i := 0; i < 2; i++ {
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/%s/ports\", \"value\": [\"portNumber\": {\"protocol\": \"TCP\", \"port\": 8080}]}]", strconv.Itoa(i))
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpIngressMultiRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("6. Traffic validation after anp %s is applied to %s", anpIngressMultiRuleCR.name, subjectNs))
exutil.By(fmt.Sprintf("6.0. SCTP and UDP ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("6.1. TCP ingress traffic to %s from the clients %s and %s is denied", nsList[1], nsList[2], subjectNs))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("6.2. TCP ingress traffic to %s from the client %s is allowed", nsList[0], subjectNs))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("7. Create second ANP for SCTP with ingress deny action from %s & %s and pass action from %s to %s", nsList[0], nsList[1], nsList[2], subjectNs))
anpIngressMultiRuleCR = multiRuleANPPolicyResource{
name: "anp-ingress-sctp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 10,
policyType: policyType,
direction: direction,
ruleName1: "deny-from-" + nsList[0],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "deny-from-" + nsList[1],
ruleAction2: "Deny",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "pass-from-" + nsList[2],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressMultiRuleCR.name)
anpIngressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("7.1 Update protocol for each rule"))
for i := 0; i < 2; i++ {
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/%s/ports\", \"value\": [\"portNumber\": {\"protocol\": \"SCTP\", \"port\": 30102}]}]", strconv.Itoa(i))
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpIngressMultiRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("8. Traffic validation after anp %s as applied to %s", anpIngressMultiRuleCR.name, subjectNs))
exutil.By(fmt.Sprintf("8.0. SCTP and UDP ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("8.1. TCP ingress traffic to %s from the clients %s and %s is denied", nsList[1], nsList[2], subjectNs))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("8.2. TCP ingress traffic to %s from the client %s is allowed", nsList[0], subjectNs))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("9. Create a network policy in %s from the client %s to allow SCTP", subjectNs, nsList[2]))
networkPolicyResource := networkPolicyProtocolResource{
name: "allow-ingress-sctp-" + testID,
namespace: subjectNs,
policy: policyType,
policyType: "Ingress",
direction: direction,
namespaceSel: matchStr,
namespaceSelKey: matchLabelKey,
namespaceSelVal: nsList[2],
podSel: matchStr,
podSelKey: "name",
podSelVal: "sctpclient",
port: 30102,
protocol: "SCTP",
template: ingressNPPolicyTemplate,
}
networkPolicyResource.createProtocolNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
patchNP := `[{"op": "add", "path": "/spec/podSelector", "value": {"matchLabels": {"name":"sctpserver"}}}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("networkpolicy", networkPolicyResource.name, "-n", subjectNs, "--type=json", "-p", patchNP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("10. Traffic validation after network policy %s is applied to %s", networkPolicyResource.name, subjectNs))
exutil.By(fmt.Sprintf("10.0. UDP ingress traffic to %s from the clients is denied", subjectNs))
for i := 0; i < 3; i++ {
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("10.1. SCTP ingress traffic to %s from the %s and %s clients is denied", subjectNs, nsList[0], nsList[1]))
for i := 0; i < 2; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
}
exutil.By(fmt.Sprintf("10.2. SCTP ingress traffic to %s from the %s client is allowed", subjectNs, nsList[2]))
checkSCTPTraffic(oc, sctpClientPodname, nsList[2], sctpServerPodName, subjectNs, true)
exutil.By(fmt.Sprintf("10.3. TCP ingress traffic to %s from the clients %s and %s is denied", nsList[1], nsList[2], subjectNs))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
exutil.By(fmt.Sprintf("10.4. TCP ingress traffic to %s from the client %s is allowed", nsList[0], subjectNs))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("11. Create third ANP for UDP with ingress pass action from %s, %s and %s to %s", nsList[0], nsList[1], nsList[2], subjectNs))
anpIngressMultiRuleCR = multiRuleANPPolicyResource{
name: "anp-ingress-udp-" + testID + "-2",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 5,
policyType: policyType,
direction: direction,
ruleName1: "pass-from-" + nsList[0],
ruleAction1: "Pass",
ruleKey1: matchLabelKey,
ruleVal1: nsList[0],
ruleName2: "pass-from-" + nsList[1],
ruleAction2: "Pass",
ruleKey2: matchLabelKey,
ruleVal2: nsList[1],
ruleName3: "pass-from-" + nsList[2],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[2],
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressMultiRuleCR.name)
anpIngressMultiRuleCR.createMultiRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressMultiRuleCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("11.1 Update protocol for each rule"))
for i := 0; i < 2; i++ {
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/%s/ports\", \"value\": [\"portNumber\": {\"protocol\": \"UDP\", \"port\": %v}]}]", strconv.Itoa(i), udpPort)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpIngressMultiRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("12. Traffic validation after admin network policy %s is applied to %s", anpIngressMultiRuleCR.name, subjectNs))
exutil.By(fmt.Sprintf("12.1 UDP traffic from all the clients to %s is denied", subjectNs))
for i := 0; i < 3; i++ {
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, false)
}
exutil.By(fmt.Sprintf("12.2 SCTP traffic from the clients %s & %s to %s is denied, allowed from %s", nsList[0], nsList[1], subjectNs, nsList[2]))
for i := 0; i < 2; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
}
checkSCTPTraffic(oc, sctpClientPodname, nsList[2], sctpServerPodName, subjectNs, true)
exutil.By(fmt.Sprintf("12.3 TCP traffic from the clients %s & %s to %s is denied, allowed from %s", nsList[1], nsList[2], subjectNs, nsList[0]))
for i := 1; i < 3; i++ {
CurlPod2PodFail(oc, nsList[i], sctpClientPodname, subjectNs, podListNs[0])
}
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("13. Create a network policy in %s from the client %s to allow SCTP", subjectNs, nsList[2]))
networkPolicyResource = networkPolicyProtocolResource{
name: "allow-all-protocols-" + testID,
namespace: subjectNs,
policy: policyType,
policyType: "Ingress",
direction: direction,
namespaceSel: matchStr,
namespaceSelKey: "team",
namespaceSelVal: "qe",
podSel: matchStr,
podSelKey: "name",
podSelVal: "sctpclient",
port: 30102,
protocol: "SCTP",
template: ingressNPPolicyTemplate,
}
networkPolicyResource.createProtocolNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
patchNP = `[{"op": "add", "path": "/spec/ingress/0/ports", "value": [{"protocol": "TCP", "port": 8080},{"protocol": "UDP", "port": 8181}, {"protocol": "SCTP", "port": 30102}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("networkpolicy", networkPolicyResource.name, "-n", subjectNs, "--type=json", "-p", patchNP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("14. Traffic validation to %s from the clients is allowed", subjectNs))
exutil.By(fmt.Sprintf("14.1 UDP ingress traffic to %s from the clients is allowed", subjectNs))
for i := 0; i < 3; i++ {
checkUDPTraffic(oc, sctpClientPodname, nsList[i], udpPodName[0], subjectNs, udpPort, true)
}
exutil.By(fmt.Sprintf("14.2 TCP traffic from the clients %s & %s to %s is allowed but denied from %s", nsList[0], nsList[2], subjectNs, nsList[1]))
CurlPod2PodPass(oc, nsList[0], sctpClientPodname, subjectNs, podListNs[0])
CurlPod2PodFail(oc, nsList[1], sctpClientPodname, subjectNs, podListNs[0])
CurlPod2PodPass(oc, nsList[2], sctpClientPodname, subjectNs, podListNs[0])
exutil.By(fmt.Sprintf("14.3 SCTP traffic from the clients %s & %s to %s is denied but allowed from %s", nsList[0], nsList[1], subjectNs, nsList[2]))
for i := 0; i < 2; i++ {
checkSCTPTraffic(oc, sctpClientPodname, nsList[i], sctpServerPodName, subjectNs, false)
}
checkSCTPTraffic(oc, sctpClientPodname, nsList[2], sctpServerPodName, subjectNs, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e11890eb-1226-42af-9975-1581ab0df816
|
Author:asood-High-67614-[FdpOvnOvs] Egress BANP, ANP and NP with allow, deny and pass action with TCP, UDP and SCTP protocols. [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-67614-[FdpOvnOvs] Egress BANP, ANP and NP with allow, deny and pass action with TCP, UDP and SCTP protocols. [Serial]", func() {
var (
testID = "67614"
testDataDir = exutil.FixturePath("testdata", "networking")
sctpTestDataDir = filepath.Join(testDataDir, "sctp")
sctpClientPod = filepath.Join(sctpTestDataDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(sctpTestDataDir, "sctpserver.yaml")
sctpModule = filepath.Join(sctpTestDataDir, "load-sctp-module.yaml")
udpListenerPod = filepath.Join(testDataDir, "udp-listener.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-me-template.yaml")
anpSingleRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-me-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
egressNPPolicyTemplate = filepath.Join(testDataDir, "networkpolicy/generic-networkpolicy-protocol-template.yaml")
matchExpKey = "kubernetes.io/metadata.name"
matchExpOper = "In"
nsList = []string{}
policyType = "egress"
direction = "to"
udpPort = "8181"
matchStr = "matchLabels"
)
exutil.By("1. Test setup")
exutil.By("Enable SCTP on all workers")
prepareSCTPModule(oc, sctpModule)
exutil.By("Get the first namespace, create three additional namespaces and label all except the subject namespace")
nsList = append(nsList, oc.Namespace())
subjectNs := nsList[0]
for i := 0; i < 3; i++ {
oc.SetupProject()
peerNs := oc.Namespace()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", peerNs, "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
nsList = append(nsList, peerNs)
}
// First created namespace for SCTP
defer exutil.RecoverNamespaceRestricted(oc, nsList[1])
exutil.SetNamespacePrivileged(oc, nsList[1])
exutil.By("2. Create a Baseline Admin Network Policy with deny action for egress to each peer namespaces for all protocols")
banpCR := singleRuleBANPMEPolicyResource{
name: "default",
subjectKey: matchExpKey,
subjectOperator: matchExpOper,
subjectVal: subjectNs,
policyType: policyType,
direction: direction,
ruleName: "default-deny-to-all",
ruleAction: "Deny",
ruleKey: matchExpKey,
ruleOperator: matchExpOper,
ruleVal: nsList[1],
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleBANPMatchExp(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
nsListVal, err := json.Marshal(nsList[1:])
o.Expect(err).NotTo(o.HaveOccurred())
patchBANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/egress/0/to/0/namespaces/matchExpressions/0/values\", \"value\": %s}]", nsListVal)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3. Create workload in namespaces")
exutil.By(fmt.Sprintf("Create client in subject %s namespace and SCTP, UDP & TCP service respectively in other three namespaces", subjectNs))
exutil.By(fmt.Sprintf("Create SCTP client pod in %s", nsList[0]))
createResourceFromFile(oc, nsList[0], sctpClientPod)
err1 := waitForPodWithLabelReady(oc, nsList[0], "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "SCTP client pod is not running")
exutil.By(fmt.Sprintf("Create SCTP server pod in %s", nsList[1]))
createResourceFromFile(oc, nsList[1], sctpServerPod)
err2 := waitForPodWithLabelReady(oc, nsList[1], "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "SCTP server pod is not running")
exutil.By(fmt.Sprintf("Create a pod in %s for TCP", nsList[2]))
rcPingPodResource := replicationControllerPingPodResource{
name: "test-pod-" + testID,
replicas: 1,
namespace: nsList[2],
template: rcPingPodTemplate,
}
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", rcPingPodResource.namespace)
rcPingPodResource.createReplicaController(oc)
err = waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(1))
exutil.By(fmt.Sprintf("Create UDP Listener Pod in %s", nsList[3]))
createResourceFromFile(oc, nsList[3], udpListenerPod)
err = waitForPodWithLabelReady(oc, nsList[3], "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "The pod with label name=udp-pod not ready")
var udpPodName []string
udpPodName = getPodName(oc, nsList[3], "name=udp-pod")
exutil.By(fmt.Sprintf("4. All type of egress traffic from %s to TCP/UDP/SCTP service is denied", subjectNs))
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], false)
CurlPod2PodFail(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, false)
exutil.By("5. Create a Admin Network Policy with allow action for egress to each peer namespaces for all protocols")
anpEgressRuleCR := singleRuleANPMEPolicyResource{
name: "anp-" + policyType + "-" + testID + "-1",
subjectKey: matchExpKey,
subjectOperator: matchExpOper,
subjectVal: subjectNs,
priority: 10,
policyType: "egress",
direction: "to",
ruleName: "allow-to-all",
ruleAction: "Allow",
ruleKey: matchExpKey,
ruleOperator: matchExpOper,
ruleVal: nsList[1],
template: anpSingleRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpEgressRuleCR.name)
anpEgressRuleCR.createSingleRuleANPMatchExp(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressRuleCR.name)).To(o.BeTrue())
exutil.By("5.1 Update ANP to include all the namespaces")
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/egress/0/to/0/namespaces/matchExpressions/0/values\", \"value\": %s}]", nsListVal)
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("6. Egress traffic from %s to TCP/UDP/SCTP service is allowed after ANP %s is applied", subjectNs, anpEgressRuleCR.name))
exutil.By(fmt.Sprintf("6.1 Egress traffic from %s to TCP and service is allowed", subjectNs))
patchANP = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 8080}}, {"portNumber": {"protocol": "UDP", "port": 8181}}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], false)
CurlPod2PodPass(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, true)
exutil.By(fmt.Sprintf("6.2 Egress traffic from %s to SCTP service is also allowed", subjectNs))
patchANP = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 8080}}, {"portNumber": {"protocol": "UDP", "port": 8181}}, {"portNumber": {"protocol": "SCTP", "port": 30102}}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], true)
CurlPod2PodPass(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, true)
exutil.By("7. Create another Admin Network Policy with pass action for egress to each peer namespaces for all protocols")
anpEgressRuleCR.name = "anp-" + policyType + "-" + testID + "-2"
anpEgressRuleCR.priority = 5
anpEgressRuleCR.ruleName = "pass-to-all"
anpEgressRuleCR.ruleAction = "Pass"
defer removeResource(oc, true, true, "anp", anpEgressRuleCR.name)
anpEgressRuleCR.createSingleRuleANPMatchExp(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpEgressRuleCR.name)).To(o.BeTrue())
exutil.By("7.1 Update ANP to include all the namespaces")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/egress/0/to/0/namespaces/matchExpressions/0/values\", \"value\": %s}]", nsListVal)
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpEgressRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("7.2 Egress traffic from %s to TCP/UDP/SCTP service is denied after ANP %s is applied", subjectNs, anpEgressRuleCR.name))
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], false)
CurlPod2PodFail(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, false)
exutil.By(fmt.Sprintf("8. Egress traffic from %s to TCP/SCTP/UDP service is allowed after network policy is applied", subjectNs))
networkPolicyResource := networkPolicyProtocolResource{
name: "allow-all-protocols-" + testID,
namespace: subjectNs,
policy: policyType,
policyType: "Egress",
direction: direction,
namespaceSel: matchStr,
namespaceSelKey: "team",
namespaceSelVal: "qe",
podSel: matchStr,
podSelKey: "name",
podSelVal: "sctpclient",
port: 30102,
protocol: "SCTP",
template: egressNPPolicyTemplate,
}
networkPolicyResource.createProtocolNetworkPolicy(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", subjectNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("8.1 Update the network policy %s in %s to add ports for protocols and all the pods ", networkPolicyResource.name, subjectNs))
patchNP := `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"protocol": "TCP", "port": 8080},{"protocol": "UDP", "port": 8181}, {"protocol": "SCTP", "port": 30102}]}, {"op": "add", "path": "/spec/egress/0/to", "value": [{"namespaceSelector": {"matchLabels": {"team": "qe"}}, "podSelector": {}}]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("networkpolicy", networkPolicyResource.name, "-n", subjectNs, "--type=json", "-p", patchNP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
checkSCTPTraffic(oc, sctpClientPodname, subjectNs, sctpServerPodName, nsList[1], true)
CurlPod2PodPass(oc, subjectNs, sctpClientPodname, nsList[2], podListNs[0])
checkUDPTraffic(oc, sctpClientPodname, subjectNs, udpPodName[0], nsList[3], udpPort, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e3ffbfd9-29d6-4843-ac82-2464634234aa
|
Author:asood-High-73189-[FdpOvnOvs] BANP and ANP ACL audit log works [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-73189-[FdpOvnOvs] BANP and ANP ACL audit log works [Serial]", func() {
var (
testID = "73189"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-pod-mixed-rule-template.yaml")
anpMultiRuleCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-pod-mixed-rule-template.yaml")
rcPingPodTemplate = filepath.Join(testDataDir, "rc-ping-for-pod-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nsList = []string{}
podKey = "color"
podVal = "red"
coloredPods = make(map[string]string)
unColoredPods = make(map[string]string)
ovnkubeNodeColoredPods = make(map[string]string)
ovnkubeNodeUnColoredPods = make(map[string]string)
)
exutil.By("1. Get the first namespace (subject) and create three peer namespaces")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
for i := 0; i < 3; i++ {
oc.SetupProject()
peerNs := oc.Namespace()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", peerNs, "team=qe").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
nsList = append(nsList, peerNs)
}
e2e.Logf("Project list %v", nsList)
exutil.By("2. Create pods in all the namespaces, label one of the pod and obtain ovnkube-node pod for the scheduled pods in subject namespace.")
rcPingPodResource := replicationControllerPingPodResource{
name: "",
replicas: 2,
namespace: "",
template: rcPingPodTemplate,
}
for i := 0; i < 4; i++ {
rcPingPodResource.namespace = nsList[i]
rcPingPodResource.name = testID + "-test-pod-" + strconv.Itoa(i)
e2e.Logf("Create replica controller for pods %s", rcPingPodResource.name)
defer removeResource(oc, true, true, "replicationcontroller", rcPingPodResource.name, "-n", nsList[i])
rcPingPodResource.createReplicaController(oc)
err := waitForPodWithLabelReady(oc, rcPingPodResource.namespace, "name="+rcPingPodResource.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", rcPingPodResource.name))
podListNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[i], "name="+rcPingPodResource.name)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListNs)).Should(o.Equal(2))
e2e.Logf("Label pod %s in project %s", podListNs[0], nsList[i])
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", podListNs[0], "-n", nsList[i], podKey+"="+podVal).Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
coloredPods[nsList[i]] = podListNs[0]
unColoredPods[nsList[i]] = podListNs[1]
if i == 0 {
e2e.Logf("Get ovnkube-node pod scheduled on the same node where first pods %s is scheduled", podListNs[0])
nodeName, nodeNameErr := exutil.GetPodNodeName(oc, nsList[i], podListNs[0])
o.Expect(nodeNameErr).NotTo(o.HaveOccurred())
ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(podErr).NotTo(o.HaveOccurred())
ovnkubeNodeColoredPods[nsList[i]] = ovnKubePod
e2e.Logf("Get equivalent ovnkube-node pod scheduled on the same node where second pod %s is scheduled", podListNs[1])
nodeName, nodeNameErr = exutil.GetPodNodeName(oc, nsList[i], podListNs[1])
o.Expect(nodeNameErr).NotTo(o.HaveOccurred())
ovnKubePod, podErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(podErr).NotTo(o.HaveOccurred())
ovnkubeNodeUnColoredPods[nsList[i]] = ovnKubePod
}
}
exutil.By("3. Create a BANP Policy with egress allow action and ingress deny action for subject namespace")
banpCR := multiPodMixedRuleBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
subjectPodKey: podKey,
subjectPodVal: podVal,
policyType1: "egress",
direction1: "to",
ruleName1: "default-allow-egress-to-colored-pods",
ruleAction1: "Allow",
ruleKey1: "team",
ruleVal1: "qe",
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "ingress",
direction2: "from",
ruleName2: "default-deny-from-colored-pods",
ruleAction2: "Deny",
ruleKey2: "team",
ruleVal2: "qe",
rulePodKey2: podKey,
rulePodVal2: podVal,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiPodMixedRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("3.1 Update BANP subject pod selector.")
patchBANP := `[{"op": "add", "path": "/spec/subject/pods/podSelector", "value": {}}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Update BANP to add another egress rule to BANP")
patchBANP = `[{"op": "add", "path": "/spec/egress/1", "value": { "action": "Deny", "name": "default-deny-unlabelled-pods", "to": [{"pods": { "namespaceSelector": {"matchLabels": {"team": "qe"}}, "podSelector": {"matchExpressions": [{"key": "color", "operator": "DoesNotExist"}]}}}]} }]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.3 Update BANP to add another ingress rule to BANP")
patchBANP = `[{"op": "add", "path": "/spec/ingress/1", "value": { "action": "Allow", "name": "default-allow-unlabelled-pods", "from": [{"pods": { "namespaceSelector": {"matchLabels": {"team": "qe"}}, "podSelector": {"matchExpressions": [{"key": "color", "operator": "DoesNotExist"}]}}}]} }]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("4. BANP ACL audit logging verification for each rule")
aclLogSearchString := fmt.Sprintf("name=\"BANP:default:Egress:0\", verdict=allow, severity=alert")
exutil.By(fmt.Sprintf("4.1 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[1], coloredPods[nsList[1]], "pass", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Egress:1\", verdict=drop, severity=alert")
exutil.By(fmt.Sprintf("4.2 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[1], unColoredPods[nsList[1]], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Ingress:0\", verdict=drop, severity=alert")
exutil.By(fmt.Sprintf("4.3 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, nsList[2], coloredPods[nsList[2]], subjectNs, coloredPods[subjectNs], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Ingress:1\", verdict=allow, severity=alert")
exutil.By(fmt.Sprintf("4.4 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, nsList[3], unColoredPods[nsList[3]], subjectNs, unColoredPods[subjectNs], "pass", aclLogSearchString, ovnkubeNodeUnColoredPods[subjectNs], true)
exutil.By("5. Update BANP to change action on ingress from allow to deny")
patchBANP = `[{"op": "add", "path": "/spec/egress/0/action", "value": "Deny"}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy/default", "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("6. Create Admin Network Policy with ingress deny from %s to %s and egress allow to %s and pass to %s from %s namespace", nsList[1], nsList[0], nsList[2], nsList[3], nsList[0]))
anpMultiMixedRuleCR := multiPodMixedRuleANPPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
subjectPodKey: podKey,
subjectPodVal: podVal,
priority: 20,
policyType1: "ingress",
direction1: "from",
ruleName1: "deny-from-" + nsList[1],
ruleAction1: "Deny",
ruleKey1: matchLabelKey,
ruleVal1: nsList[1],
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "egress",
direction2: "to",
ruleName2: "allow-to-" + nsList[2],
ruleAction2: "Allow",
ruleKey2: matchLabelKey,
ruleVal2: nsList[2],
rulePodKey2: podKey,
rulePodVal2: podVal,
ruleName3: "pass-to-" + nsList[3],
ruleAction3: "Pass",
ruleKey3: matchLabelKey,
ruleVal3: nsList[3],
rulePodKey3: "color",
rulePodVal3: "red",
template: anpMultiRuleCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpMultiMixedRuleCR.name)
anpMultiMixedRuleCR.createMultiPodMixedRuleANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpMultiMixedRuleCR.name)).To(o.BeTrue())
aclLogSearchString = fmt.Sprintf("name=\"ANP:%s:Ingress:0\", verdict=drop, severity=alert", anpMultiMixedRuleCR.name)
exutil.By(fmt.Sprintf("6.1 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, nsList[1], coloredPods[nsList[1]], subjectNs, coloredPods[subjectNs], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"ANP:%s:Egress:0\", verdict=allow, severity=warning", anpMultiMixedRuleCR.name)
exutil.By(fmt.Sprintf("6.2 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[2], coloredPods[nsList[2]], "pass", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
aclLogSearchString = fmt.Sprintf("name=\"ANP:%s:Egress:1\", verdict=pass, severity=info", anpMultiMixedRuleCR.name)
exutil.By(fmt.Sprintf("6.3 Verify ACL Logging for rule %s", aclLogSearchString))
checkACLLogs(oc, subjectNs, coloredPods[subjectNs], nsList[3], coloredPods[nsList[3]], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], true)
exutil.By("7. Update BANP Policy annotation to see allow ACL is no longer audited")
aclSettings := aclSettings{DenySetting: "", AllowSetting: "warning"}
annotationErr := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("--overwrite", "baselineadminnetworkpolicy", "default", aclSettings.getJSONString()).Execute()
o.Expect(annotationErr).NotTo(o.HaveOccurred())
exutil.By("8. Update ANP Policy ingress rule from allow to pass to verify BANP ACL logging change")
patchANP := `[{"op": "replace", "path": "/spec/ingress/0/action", "value": "Pass" }]`
patchANPErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("anp", anpMultiMixedRuleCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchANPErr).NotTo(o.HaveOccurred())
aclLogSearchString = fmt.Sprintf("name=\"BANP:default:Ingress:0\", verdict=drop, severity=alert")
exutil.By(fmt.Sprintf("8.1 Verify ACL for rule %s in BANP is not logged", aclLogSearchString))
checkACLLogs(oc, nsList[1], coloredPods[nsList[1]], subjectNs, coloredPods[subjectNs], "fail", aclLogSearchString, ovnkubeNodeColoredPods[subjectNs], false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
8bddd6ec-7b3b-47fe-a716-f807c8156261
|
Author:asood-High-73604-BANP and ANP validation. [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-73604-BANP and ANP validation. [Serial]", func() {
var (
testID = "73604"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
validCIDR = "10.10.10.1/24"
matchLabelKey = "kubernetes.io/metadata.name"
invalidCIDR = "10.10.10.1-10.10.10.1"
invalidIPv6 = "2001:db8:a0b:12f0::::0:1/128"
expectedMessages = [3]string{"Duplicate value", "Invalid CIDR format provided", "Invalid CIDR format provided"}
resourceType = [2]string{"banp", "anp"}
patchCIDR = []string{}
resourceName = []string{}
patchAction string
)
subjectNs := oc.Namespace()
exutil.By("Create BANP with single rule with CIDR")
banp := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: subjectNs,
ruleName: "Egress to CIDR",
ruleAction: "Deny",
cidr: validCIDR,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banp.name)
banp.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banp.name)).To(o.BeTrue())
resourceName = append(resourceName, banp.name)
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-0-" + testID,
subjectKey: matchLabelKey,
subjectVal: subjectNs,
priority: 10,
ruleName: "Egress to CIDR",
ruleAction: "Deny",
cidr: validCIDR,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
resourceName = append(resourceName, anpCR.name)
patchCIDR = append(patchCIDR, fmt.Sprintf("[{\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\": %s }]", validCIDR))
patchCIDR = append(patchCIDR, fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/egress/0/to/0/networks/0\", \"value\": %s}]", invalidCIDR))
patchCIDR = append(patchCIDR, fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/egress/0/to/0/networks/0\", \"value\": %s}]", invalidIPv6))
exutil.By("BANP and ANP validation with invalid CIDR values")
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("Validating %s with name %s", strings.ToUpper(resourceType[i]), resourceName[i]))
for j := 0; j < len(expectedMessages); j++ {
exutil.By(fmt.Sprintf("Validating %s message", expectedMessages[j]))
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(resourceType[i], resourceName[i], "--type=json", "-p", patchCIDR[j]).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, expectedMessages[j])).To(o.BeTrue())
}
}
exutil.By("BANP and ANP validation with action values in lower case")
policyActions := map[string][]string{"banp": {"allow", "deny"}, "anp": {"allow", "deny", "pass"}}
idx := 0
for _, polType := range resourceType {
exutil.By(fmt.Sprintf("Validating %s with name %s", strings.ToUpper(polType), resourceName[idx]))
for _, actionStr := range policyActions[polType] {
exutil.By(fmt.Sprintf("Validating invalid action %s", actionStr))
patchAction = fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/egress/0/action\", \"value\": %s}]", actionStr)
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(polType, resourceName[idx], "--type=json", "-p", patchAction).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, fmt.Sprintf("Unsupported value: \"%s\"", actionStr))).To(o.BeTrue())
}
idx = idx + 1
}
exutil.By("ANP validation for priority more than 99")
anpCR.name = "anp-1-" + testID
anpCR.priority = 100
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
statusChk, statusChkMsg := checkSpecificPolicyStatus(oc, "anp", anpCR.name, "message", "OVNK only supports priority ranges 0-99")
o.Expect(statusChk).To(o.BeTrue())
o.Expect(statusChkMsg).To(o.BeEmpty())
})
| |||||
test case
|
openshift/openshift-tests-private
|
b7a17487-e0d2-42ae-862d-45033ebd89f9
|
Author:asood-High-73802-[FdpOvnOvs] BANP and ANP work with named ports. [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-73802-[FdpOvnOvs] BANP and ANP work with named ports. [Serial]", func() {
var (
testID = "73802"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-pod-mixed-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-me-template.yaml")
namedPortPodTemplate = filepath.Join(testDataDir, "named-port-pod-template.yaml")
direction = "from"
policyType = "ingress"
namespaceLabelKey = "team"
namespaceLabelVal = "qe"
podKey = "name"
podVal = "hello-pod"
nsList = []string{}
dummyLabel = "qe1"
)
exutil.By("1. Get the first namespace (subject) and create another (peer)")
subjectNs := oc.Namespace()
nsList = append(nsList, subjectNs)
oc.SetupProject()
peerNs := oc.Namespace()
nsList = append(nsList, peerNs)
exutil.By("2. Create two pods in each namespace and label namespaces")
namedPortPod := namedPortPodResource{
name: "",
namespace: "",
podLabelKey: "name",
podLabelVal: "hello-pod",
portname: "",
containerport: 8080,
template: namedPortPodTemplate,
}
podNames := []string{"hello-pod-" + testID + "-1", "hello-pod-" + testID + "-2"}
portNames := []string{"web", "web123"}
for i := 0; i < 2; i++ {
namedPortPod.namespace = nsList[i]
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nsList[i], namespaceLabelKey+"="+namespaceLabelVal).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for j := 0; j < len(podNames); j++ {
namedPortPod.name = podNames[j]
namedPortPod.portname = portNames[j]
namedPortPod.createNamedPortPod(oc)
}
err = waitForPodWithLabelReady(oc, namedPortPod.namespace, namedPortPod.podLabelKey+"="+namedPortPod.podLabelVal)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label %s=%s in %s not ready", namedPortPod.podLabelKey, namedPortPod.podLabelVal, namedPortPod.namespace))
podListInNs, podListErr := exutil.GetAllPodsWithLabel(oc, nsList[i], namedPortPod.podLabelKey+"="+namedPortPod.podLabelVal)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podListInNs)).Should(o.Equal(2))
e2e.Logf("Pods %s in %s namespace", podListInNs, nsList[i])
}
exutil.By("3. Create a ANP with deny and pass action for ingress to projects with label team=qe")
anpCR := singleRuleANPMEPolicyResource{
name: "anp-" + testID + "-1",
subjectKey: namespaceLabelKey,
subjectOperator: "In",
subjectVal: namespaceLabelVal,
priority: 25,
policyType: policyType,
direction: direction,
ruleName: "deny ingress",
ruleAction: "Deny",
ruleKey: namespaceLabelKey,
ruleOperator: "NotIn",
ruleVal: dummyLabel,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleANPMatchExp(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("3.1 Update ANP's first rule with named port")
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/ports\", \"value\": [\"namedPort\": %s]}]", policyType, portNames[0])
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Update ANP to add second ingress rule with named port")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/1\", \"value\": {\"name\":\"pass ingress\", \"action\": \"Pass\", \"from\": [{\"namespaces\": {\"matchLabels\": {%s: %s}}}], \"ports\":[{\"namedPort\": %s}]}}]", policyType, namespaceLabelKey, namespaceLabelVal, portNames[1])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("3.3 Validate traffic is blocked between pods with named port %s but passes through the pods with named ports %s", portNames[0], portNames[1]))
CurlPod2PodPass(oc, nsList[0], podNames[1], nsList[1], podNames[1])
CurlPod2PodPass(oc, nsList[1], podNames[1], nsList[0], podNames[1])
CurlPod2PodFail(oc, nsList[0], podNames[0], nsList[1], podNames[0])
CurlPod2PodFail(oc, nsList[1], podNames[0], nsList[0], podNames[0])
exutil.By("4. Create a BANP with deny and pass action for ingress to projects with label team=qe")
exutil.By("4.0 Update ANP change Deny action to Pass for first rule")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/name\", \"value\": \"pass ingress\"}, {\"op\": \"add\", \"path\":\"/spec/%s/0/action\", \"value\": \"Pass\"}]", policyType, policyType)
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
banpCR := multiPodMixedRuleBANPPolicyResource{
name: "default",
subjectKey: namespaceLabelKey,
subjectVal: namespaceLabelVal,
subjectPodKey: podKey,
subjectPodVal: podVal,
policyType1: policyType,
direction1: direction,
ruleName1: "default-allow-ingress",
ruleAction1: "Allow",
ruleKey1: "team",
ruleVal1: "qe",
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "egress",
direction2: "to",
ruleName2: "default-deny-from-colored-pods",
ruleAction2: "Deny",
ruleKey2: "team",
ruleVal2: "qe",
rulePodKey2: podKey,
rulePodVal2: podVal,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiPodMixedRuleBANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("4.1 Remove egress rule in BANP")
patchBANP := fmt.Sprintf("[{\"op\": \"remove\", \"path\":\"/spec/egress\"}]")
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("4.2 Update first rule with named port")
patchBANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/ports\", \"value\": [\"namedPort\": %s]}]", policyType, portNames[1])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("4.3 Add another rule with first named port")
patchBANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/1\", \"value\": {\"name\":\"deny ingress\", \"action\": \"Deny\", \"from\": [{\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {}}}], \"ports\":[{\"namedPort\": %s}]}}]", policyType, namespaceLabelKey, namespaceLabelVal, portNames[0])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("4.4 Validate traffic passes between pods with named port %s but is blocked between the pods with named ports %s", portNames[1], portNames[0]))
CurlPod2PodPass(oc, nsList[0], podNames[0], nsList[1], podNames[1])
CurlPod2PodPass(oc, nsList[1], podNames[0], nsList[0], podNames[1])
CurlPod2PodFail(oc, nsList[0], podNames[1], nsList[1], podNames[0])
CurlPod2PodFail(oc, nsList[1], podNames[1], nsList[0], podNames[0])
})
| |||||
test case
|
openshift/openshift-tests-private
|
a61e93e9-f2e7-4239-8abf-25a4e29d2c13
|
Author:asood-NonHyperShiftHOST-High-73454-[FdpOvnOvs] Egress traffic works with ANP, BANP and NP with node egress peer. [Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-NonHyperShiftHOST-High-73454-[FdpOvnOvs] Egress traffic works with ANP, BANP and NP with node egress peer. [Serial]", func() {
var (
testID = "73454"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template-node.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
egressTypeFile = filepath.Join(testDataDir, "networkpolicy", "default-allow-egress.yaml")
httpServerPodNodeTemplate = filepath.Join(testDataDir, "httpserverPod-specific-node-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataDir, "ping-for-pod-specific-node-template.yaml")
containerport int32 = 30001
hostport int32 = 30003
direction = "to"
policyType = "egress"
nsMatchLabelKey = "kubernetes.io/metadata.name"
nodeLabels = []string{"qe", "ocp"}
labelledNodeMap = make(map[string]string)
nodePodMap = make(map[string]string)
newNodePodMap = make(map[string]string)
numWorkerNodes = 2
)
exutil.By("1.0 Get the worker nodes in the cluster")
workersList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workersList.Items) < numWorkerNodes {
g.Skip("Skipping the test as it requires two worker nodes, found insufficient worker nodes")
}
exutil.By("1.1 Label the worker nodes")
for i := 0; i < numWorkerNodes; i++ {
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workersList.Items[i].Name, "team", nodeLabels[i])
labelledNodeMap[nodeLabels[i]] = workersList.Items[i].Name
}
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, labelledNodeMap["ocp"], "team")
exutil.By("1.2 Create the pods on cluster network and pods that open port on worker nodes")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
httpServerPod := httpserverPodResourceNode{
name: "",
namespace: ns,
containerport: containerport,
hostport: hostport,
nodename: "",
template: httpServerPodNodeTemplate,
}
for i := 0; i < numWorkerNodes; i++ {
httpServerPod.name = "httpserverpod-" + testID + "-" + strconv.Itoa(i)
httpServerPod.nodename = workersList.Items[i].Name
httpServerPod.createHttpservePodNodeByAdmin(oc)
waitPodReady(oc, ns, httpServerPod.name)
}
pod := pingPodResourceNode{
name: "",
namespace: ns,
nodename: "",
template: pingPodNodeTemplate,
}
for i := 0; i < 2; i++ {
pod.name = "test-pod-" + testID + "-" + strconv.Itoa(i)
pod.nodename = workersList.Items[i].Name
pod.createPingPodNode(oc)
waitPodReady(oc, ns, pod.name)
nodePodMap[pod.nodename] = pod.name
}
exutil.By("1.3 Validate from the pods running on all the nodes, egress traffic from each node is allowed.\n")
nodeList := []string{labelledNodeMap["ocp"], labelledNodeMap["qe"]}
for _, egressNode := range nodeList {
// Ping between the nodes does not work on all clusters, therefore check allowed ICMP egress traffic from pod running on the node
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeTrue())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
}
exutil.By("2.0 Create BANP to block egress traffic from all the worker nodes.\n")
banp := singleRuleBANPPolicyResourceNode{
name: "default",
subjectKey: nsMatchLabelKey,
subjectVal: ns,
policyType: policyType,
direction: direction,
ruleName: "default-egress",
ruleAction: "Deny",
ruleKey: "kubernetes.io/hostname",
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banp.name)
banp.createSingleRuleBANPNode(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banp.name)).To(o.BeTrue())
exutil.By("2.1 Validate from the pods running on all the nodes, egress traffic from each node is blocked.\n")
nodeList = []string{labelledNodeMap["ocp"], labelledNodeMap["qe"]}
for _, egressNode := range nodeList {
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodeFail(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
}
exutil.By("3.0 Create ANP with egress traffic allowed from node labeled team=qe but blocked from other nodes.\n")
anp := singleRuleANPPolicyResourceNode{
name: "anp-node-egress-peer-" + testID,
subjectKey: nsMatchLabelKey,
subjectVal: ns,
priority: 40,
policyType: policyType,
direction: direction,
ruleName: "allow egress",
ruleAction: "Allow",
ruleKey: "team",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: nodeLabels[0],
actionname: "pass egress",
actiontype: "Pass",
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anp.name)
anp.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anp.name)).To(o.BeTrue())
patchANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/1\", \"value\": {\"name\":\"deny egress\", \"action\": \"Deny\", \"to\": [{\"nodes\": {\"matchExpressions\": [{\"key\":\"team\", \"operator\": \"In\", \"values\":[%s]}]}}]}}]", policyType, nodeLabels[1])
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anp.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anp.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules %s after update : ", anpRules)
exutil.By("3.1 Validate from the pods running on all the nodes, egress traffic from node labeled team=qe is allowed.\n")
egressNode := labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeTrue())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("3.2 Validate from the pods running on all the nodes, egress traffic from the node labelled team=ocp is blocked.\n")
egressNode = labelledNodeMap["ocp"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodeFail(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("4.0 Update ANP with only HTTP egress traffic is allowed from node labeled team=qe and all other traffic blocked from other nodes")
patchANP = fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/%s/0/ports\", \"value\": [\"portRange\": {\"protocol\": \"TCP\", \"start\": %s, \"end\": %s}]}]", policyType, strconv.Itoa(int(containerport)), strconv.Itoa(int(hostport)))
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anp.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anp.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules %s after update : ", anpRules)
exutil.By("4.1 Validate from the pods running on all the nodes, only HTTP egress traffic is allowed from node labeled team=qe.\n")
egressNode = labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("5.0 Create new set of pods to validate ACLs are created as per (B)ANP already created.\n")
for i := 0; i < 2; i++ {
pod.name = "new-test-pod-" + testID + "-" + strconv.Itoa(i)
pod.nodename = workersList.Items[i].Name
pod.createPingPodNode(oc)
waitPodReady(oc, ns, pod.name)
newNodePodMap[pod.nodename] = pod.name
}
exutil.By("5.1 Validate from newly created pods on all the nodes, egress traffic from node with label team=ocp is blocked.\n")
egressNode = labelledNodeMap["ocp"]
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodeFail(oc, ns, newNodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("5.2 Validate from newly created pods on all the nodes, only HTTP egress traffic is allowed from node labeled team=qe.\n")
egressNode = labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, newNodePodMap[egressNode])).To(o.BeFalse())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, newNodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
exutil.By("6.0 Create a NP to override BANP to allow egress traffic from node with no label\n")
createResourceFromFile(oc, ns, egressTypeFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "default-allow-egress")).To(o.BeTrue())
exutil.By("6.1 Remove the label team=qe from the node.\n")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, labelledNodeMap["qe"], "team")
exutil.By("6.2 Validate from pods on all the nodes, all egress traffic from node that had label team=qe is now allowed.\n")
egressNode = labelledNodeMap["qe"]
o.Expect(checkNodeAccessibilityFromAPod(oc, egressNode, ns, nodePodMap[egressNode])).To(o.BeTrue())
for i := 0; i < numWorkerNodes; i++ {
CurlPod2NodePass(oc, ns, nodePodMap[workersList.Items[i].Name], egressNode, strconv.Itoa(int(hostport)))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
992d6615-102d-44fe-86f0-732c23865514
|
Author:asood-High-73331-BANP and ANP metrics are available. [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-73331-BANP and ANP metrics are available. [Serial]", func() {
var (
testID = "73331"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-multi-pod-mixed-rule-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-me-template.yaml")
anpNodeCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
namespaceLabelKey = "team"
namespaceLabelVal = "qe"
podKey = "name"
podVal = "hello-pod"
expectedBANPMetricsValue = make(map[string]string)
expectedANPMetricsValue = make(map[string]string)
banpEgress = make(map[string]string)
banpIngress = make(map[string]string)
anpEgress = make(map[string]string)
anpIngress = make(map[string]string)
)
// Initialize variables
banpMetricsList := []string{"ovnkube_controller_baseline_admin_network_policies", "ovnkube_controller_baseline_admin_network_policies_db_objects", "ovnkube_controller_baseline_admin_network_policies_rules"}
anpMetricsList := []string{"ovnkube_controller_admin_network_policies", "ovnkube_controller_admin_network_policies_db_objects", "ovnkube_controller_admin_network_policies_rules"}
actionList := []string{"Allow", "Deny", "Pass"}
dbObjects := []string{"ACL", "Address_Set"}
expectedBANPMetricsValue[banpMetricsList[0]] = "1"
expectedBANPMetricsValue[dbObjects[0]] = "2"
expectedANPMetricsValue[anpMetricsList[0]] = "1"
expectedANPMetricsValue[dbObjects[0]] = "1"
ipStackType := checkIPStackType(oc)
exutil.By("1. Create a BANP with two rules with Allow action for Ingress and Deny action for Egress")
banpCR := multiPodMixedRuleBANPPolicyResource{
name: "default",
subjectKey: namespaceLabelKey,
subjectVal: namespaceLabelVal,
subjectPodKey: podKey,
subjectPodVal: podVal,
policyType1: "ingress",
direction1: "from",
ruleName1: "default-allow-ingress",
ruleAction1: "Allow",
ruleKey1: namespaceLabelKey,
ruleVal1: namespaceLabelVal,
rulePodKey1: podKey,
rulePodVal1: podVal,
policyType2: "egress",
direction2: "to",
ruleName2: "default-deny-egress",
ruleAction2: "Deny",
ruleKey2: namespaceLabelVal,
ruleVal2: namespaceLabelVal,
rulePodKey2: podKey,
rulePodVal2: podVal,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createMultiPodMixedRuleBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By(fmt.Sprintf("2.1 Validate %s metrics for BANP", banpMetricsList[0]))
getPolicyMetrics(oc, banpMetricsList[0], expectedBANPMetricsValue[banpMetricsList[0]])
// Address set
if ipStackType == "dualstack" {
expectedBANPMetricsValue[dbObjects[1]] = "4"
} else {
expectedBANPMetricsValue[dbObjects[1]] = "2"
}
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("2.2.%d Validate %s - %s metrics for BANP", i, banpMetricsList[1], dbObjects[i]))
getPolicyMetrics(oc, banpMetricsList[1], expectedBANPMetricsValue[dbObjects[i]], dbObjects[i])
}
banpEgress[actionList[1]] = "1"
banpIngress[actionList[0]] = "1"
ruleDirection := "Egress"
exutil.By(fmt.Sprintf("3. Validate metrics %s for BANP, %s rule and %s action", banpMetricsList[2], ruleDirection, actionList[1]))
getPolicyMetrics(oc, banpMetricsList[2], banpEgress[actionList[1]], ruleDirection, actionList[1])
ruleDirection = "Ingress"
exutil.By(fmt.Sprintf("4. Validate metrics %s for BANP, %s rule and %s action", banpMetricsList[2], ruleDirection, actionList[0]))
getPolicyMetrics(oc, banpMetricsList[2], banpIngress[actionList[0]], ruleDirection, actionList[0])
banpIngress[actionList[1]] = "1"
exutil.By(fmt.Sprintf("5. Update BANP to add another ingress rule and validate metrics %s", banpMetricsList[2]))
patchBANP := fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/spec/ingress/1\", \"value\": {\"name\":\"deny ingress\", \"action\": \"Deny\", \"from\": [{\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {}}}]}}]", namespaceLabelKey, namespaceLabelVal)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchBANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
getPolicyMetrics(oc, banpMetricsList[2], banpIngress[actionList[1]], ruleDirection, actionList[1])
exutil.By("6. Create a ANP with one ingress rule with deny action.")
anpCR := singleRuleANPMEPolicyResource{
name: "anp-" + testID + "-0",
subjectKey: namespaceLabelKey,
subjectOperator: "In",
subjectVal: namespaceLabelVal,
priority: 25,
policyType: "ingress",
direction: "from",
ruleName: "deny ingress",
ruleAction: "Deny",
ruleKey: namespaceLabelKey,
ruleOperator: "NotIn",
ruleVal: "ns" + testID,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleANPMatchExp(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
// Address set
if ipStackType == "dualstack" {
expectedANPMetricsValue[dbObjects[1]] = "2"
} else {
expectedANPMetricsValue[dbObjects[1]] = "1"
}
exutil.By(fmt.Sprintf("7.1 Validate %s metrics for ANP %s", anpMetricsList[0], anpCR.name))
getPolicyMetrics(oc, anpMetricsList[0], expectedANPMetricsValue[anpMetricsList[0]])
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("7.2.%d Validate %s - %s metrics for ANP %s", i, anpMetricsList[1], dbObjects[i], anpCR.name))
getPolicyMetrics(oc, anpMetricsList[1], expectedANPMetricsValue[dbObjects[i]], dbObjects[i])
}
ruleDirection = "Ingress"
anpIngress[actionList[1]] = "1"
exutil.By(fmt.Sprintf("8. Validate metrics %s for ANP, %s rule and %s action", anpMetricsList[2], ruleDirection, actionList[1]))
getPolicyMetrics(oc, anpMetricsList[2], anpIngress[actionList[1]], ruleDirection, actionList[1])
exutil.By("9. Create another ANP with egress pass and allow rule.")
anpNodeCR := singleRuleANPPolicyResourceNode{
name: "anp-" + testID + "-1",
subjectKey: namespaceLabelKey,
subjectVal: namespaceLabelVal,
priority: 40,
policyType: "egress",
direction: "to",
ruleName: "allow egress",
ruleAction: "Allow",
ruleKey: "team",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: "worker-1",
actionname: "pass egress",
actiontype: "Pass",
template: anpNodeCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpNodeCR.name)
anpNodeCR.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpNodeCR.name)).To(o.BeTrue())
ruleDirection = "Egress"
anpEgress[actionList[0]] = "1"
anpEgress[actionList[2]] = "1"
exutil.By(fmt.Sprintf("10. Validate metrics %s for ANP, %s rule and %s action", anpMetricsList[2], ruleDirection, actionList[0]))
getPolicyMetrics(oc, anpMetricsList[2], anpEgress[actionList[0]], ruleDirection, actionList[0])
exutil.By(fmt.Sprintf("11. Validate metrics %s for ANP, %s rule and %s action", anpMetricsList[2], ruleDirection, actionList[2]))
getPolicyMetrics(oc, anpMetricsList[2], anpEgress[actionList[2]], ruleDirection, actionList[2])
expectedANPMetricsValue[anpMetricsList[0]] = "2"
expectedANPMetricsValue[dbObjects[0]] = "3"
// Address set
if ipStackType == "dualstack" {
expectedANPMetricsValue[dbObjects[1]] = "6"
} else {
expectedANPMetricsValue[dbObjects[1]] = "3"
}
exutil.By(fmt.Sprintf("12.1 Validate %s metrics for both ANP policies", anpMetricsList[0]))
getPolicyMetrics(oc, anpMetricsList[0], expectedANPMetricsValue[anpMetricsList[0]])
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("12.2.%d Validate %s - %s metrics for both ANP policies", i, anpMetricsList[1], dbObjects[i]))
getPolicyMetrics(oc, anpMetricsList[1], expectedANPMetricsValue[dbObjects[i]], dbObjects[i])
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
208cb246-c664-4d0e-9603-a672e01298dc
|
Author:asood-Longduration-NonPreRelease-High-73453-[FdpOvnOvs] Egress traffic works with ANP, BANP and NP with network egress peer. [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-Longduration-NonPreRelease-High-73453-[FdpOvnOvs] Egress traffic works with ANP, BANP and NP with network egress peer. [Serial]", func() {
var (
testID = "73453"
testDataDir = exutil.FixturePath("testdata", "networking")
banpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
anpMultiNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-multi-rule-cidr-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataDir, "ping-for-pod-specific-node-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-egress-single-CIDR-template.yaml")
matchLabelKey = "team"
matchLabelVal = "ocp"
matchLabelKey1 = "kubernetes.io/metadata.name"
nsPodMap = make(map[string][]string)
urlToLookup = "www.facebook.com"
)
if checkProxy(oc) {
g.Skip("This cluster has proxy configured, egress access cannot be tested on the cluster, skip the test.")
}
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
if !checkIPv6PublicAccess(oc) {
g.Skip("This cluster is dualstack/IPv6 with no access to public websites, egress access cannot be tested on the cluster, skip the test.")
}
}
var allCIDRs, googleIP1, googleIP2, googleDNSServerIP1, googleDNSServerIP2, patchANPCIDR, patchNP string
var allNS, checkIPAccessList []string
exutil.By("0. Get the workers list ")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1.1 Create another namespace")
allNS = append(allNS, oc.Namespace())
oc.SetupProject()
allNS = append(allNS, oc.Namespace())
exutil.By("1.2 Label namespaces.")
for i := 0; i < len(allNS); i++ {
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[i], matchLabelKey+"="+matchLabelVal).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("1.3 Create 2 pods in each namespace")
pod := pingPodResourceNode{
name: "",
namespace: "",
nodename: "",
template: pingPodNodeTemplate,
}
for i := 0; i < len(allNS); i++ {
pod.nodename = workerList.Items[0].Name
pod.namespace = allNS[i]
for j := 0; j < 2; j++ {
pod.name = "test-pod-" + testID + "-" + strconv.Itoa(j)
pod.createPingPodNode(oc)
waitPodReady(oc, allNS[i], pod.name)
nsPodMap[allNS[i]] = append(nsPodMap[allNS[i]], pod.name)
}
}
exutil.By("2. Get one IP address for domain name www.google.com")
ipv4, ipv6 := getIPFromDnsName("www.google.com")
o.Expect(len(ipv4) == 0).NotTo(o.BeTrue())
checkIPAccessList = append(checkIPAccessList, ipv4)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
o.Expect(len(ipv6) == 0).NotTo(o.BeTrue())
checkIPAccessList = append(checkIPAccessList, ipv6)
}
// Set up networks to be used in (B)ANP
switch ipStackType {
case "ipv4single":
allCIDRs = "0.0.0.0/0"
googleIP1 = ipv4 + "/32"
googleDNSServerIP1 = "8.8.8.8/32"
case "ipv6single":
allCIDRs = "::/0"
googleIP1 = ipv6 + "/128"
googleDNSServerIP1 = "2001:4860:4860::8888/128"
case "dualstack":
allCIDRs = "0.0.0.0/0"
googleIP1 = ipv4 + "/32"
googleIP2 = ipv6 + "/128"
googleDNSServerIP1 = "8.8.8.8/32"
googleDNSServerIP2 = "2001:4860:4860::8888/128"
default:
// Do nothing
}
exutil.By("3.1 Egress traffic works before BANP is created")
for i := 0; i < 2; i++ {
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, true)
}
}
exutil.By("3.2 Create a BANP to deny egress to all networks from all namespaces")
banpCIDR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: matchLabelVal,
ruleName: "deny egress to all networks",
ruleAction: "Deny",
cidr: allCIDRs,
template: banpNetworkTemplate,
}
defer removeResource(oc, true, true, "banp", banpCIDR.name)
banpCIDR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCIDR.name)).To(o.BeTrue())
if ipStackType == "dualstack" {
patchBANPCIDR := `[{"op": "add", "path": "/spec/egress/0/to/0/networks/1", "value":"::/0"}]`
patchReplaceResourceAsAdmin(oc, "banp/"+banpCIDR.name, patchBANPCIDR)
banpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp", banpCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n BANP Rules after update: %s", banpRules)
}
exutil.By("3.3 Egress traffic does not works after BANP is created")
for i := 0; i < 2; i++ {
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, false)
}
}
exutil.By("4.0 Create a ANP to allow egress traffic to TCP port 80 and verify egress traffic works from first namespace")
anpCIDR := singleRuleCIDRANPPolicyResource{
name: "anp-network-egress-peer-" + testID,
subjectKey: matchLabelKey1,
subjectVal: allNS[0],
priority: 45,
ruleName: "allow egress network from first namespace",
ruleAction: "Allow",
cidr: googleIP1,
template: anpNetworkTemplate,
}
defer removeResource(oc, true, true, "anp", anpCIDR.name)
anpCIDR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCIDR.name)).To(o.BeTrue())
if ipStackType == "dualstack" {
patchANPCIDR = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 80}}]}, {"op": "add", "path": "/spec/egress/0/to/0/networks/1", "value":"` + googleIP2 + `"} ]`
} else {
patchANPCIDR = `[{"op": "add", "path": "/spec/egress/0/ports", "value": [{"portNumber": {"protocol": "TCP", "port": 80}}]}]`
}
patchReplaceResourceAsAdmin(oc, "anp/"+anpCIDR.name, patchANPCIDR)
anpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
exutil.By("4.1 Egress traffic allowed from first but blocked from second namespace after ANP is created.")
resultList := []bool{true, false}
for i, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, resultList[i])
}
exutil.By("4.2 Egress traffic allowed from newly created pod in first namespace but blocked from second namespace.")
for i := 0; i < len(allNS); i++ {
pod.nodename = workerList.Items[0].Name
pod.namespace = allNS[i]
pod.name = "test-pod-" + testID + "-" + "3"
pod.createPingPodNode(oc)
waitPodReady(oc, allNS[i], pod.name)
nsPodMap[allNS[i]] = append(nsPodMap[allNS[i]], pod.name)
}
for i, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][2], allNS[i], ip, resultList[i])
}
exutil.By("5.0 Create a ANP to allow egress traffic to TCP port 80 from pod labeled color=red in second namespace")
anpMultiCIDR := MultiRuleCIDRANPPolicyResource{
name: "anp-network-egress-peer-" + testID + "-0",
subjectKey: matchLabelKey1,
subjectVal: allNS[1],
priority: 30,
ruleName1: "egress to TCP server",
ruleAction1: "Allow",
cidr1: googleIP1,
ruleName2: "egress to UDP server",
ruleAction2: "Allow",
cidr2: googleDNSServerIP1,
template: anpMultiNetworkTemplate,
}
defer removeResource(oc, true, true, "anp", anpMultiCIDR.name)
anpMultiCIDR.createMultiRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpMultiCIDR.name)).To(o.BeTrue())
exutil.By("5.1 Update the rules to add port & protocol and subject to apply rules to specific pod")
if ipStackType == "dualstack" {
patchANPCIDR = fmt.Sprintf("[ {\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {\"matchLabels\": {\"color\": \"red\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\":%s}, {\"op\": \"add\", \"path\": \"/spec/egress/1/to/0/networks/1\", \"value\":%s}]", matchLabelKey1, allNS[1], googleIP2, googleDNSServerIP2)
} else {
patchANPCIDR = fmt.Sprintf("[ {\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchLabels\": {%s: %s}}, \"podSelector\": {\"matchLabels\": {\"color\": \"red\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}]", matchLabelKey1, allNS[1])
}
patchReplaceResourceAsAdmin(oc, "anp/"+anpMultiCIDR.name, patchANPCIDR)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
anpSubject, subErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.subject}").Output()
o.Expect(subErr).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Subject after update: %s", anpSubject)
exutil.By("5.2 Label the pod")
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", nsPodMap[allNS[1]][2], "-n", allNS[1], "color=red").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("7.1 Validate TCP and UDP egress traffic from labelled pod in second namespace")
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[1]][2], allNS[1], ip, true)
}
verifyNslookup(oc, nsPodMap[allNS[1]][2], allNS[1], urlToLookup, true)
exutil.By("7.2 Validate TCP egress traffic from unlabelled pod in second namespace and from pod in first namespace works is not impacted")
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[1]][0], allNS[1], ip, false)
}
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[0]][2], allNS[0], ip, true)
}
verifyNslookup(oc, nsPodMap[allNS[1]][0], allNS[1], urlToLookup, false)
verifyNslookup(oc, nsPodMap[allNS[0]][0], allNS[0], urlToLookup, false)
exutil.By("8.0 Create third ANP to allow egress traffic from pod labeled color=blue in both namespaces")
anpMultiCIDR.name = "anp-network-egress-peer-" + testID + "-1"
anpMultiCIDR.priority = 25
// Rule 1
anpMultiCIDR.ruleName1 = "egress to udp server"
anpMultiCIDR.ruleAction1 = "Pass"
anpMultiCIDR.cidr1 = googleDNSServerIP1
// Rule 2
anpMultiCIDR.ruleName2 = "egress to tcp server"
anpMultiCIDR.ruleAction2 = "Allow"
anpMultiCIDR.cidr2 = googleIP1
defer removeResource(oc, true, true, "anp", anpMultiCIDR.name)
anpMultiCIDR.createMultiRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpMultiCIDR.name)).To(o.BeTrue())
exutil.By("8.1 Update the rules to add port & protocol and subject to apply rules to pods labelled blue")
if ipStackType == "dualstack" {
patchANPCIDR = fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchExpressions\": [{\"key\": %s, \"operator\": \"In\", \"values\": [%s, %s]}]}, \"podSelector\": {\"matchLabels\": {\"color\": \"blue\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\":%s}, {\"op\": \"add\", \"path\": \"/spec/egress/1/to/0/networks/1\", \"value\":%s}]", matchLabelKey1, allNS[0], allNS[1], googleDNSServerIP2, googleIP2)
} else {
patchANPCIDR = fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/subject\", \"value\": {\"pods\": {\"namespaceSelector\": {\"matchExpressions\": [{\"key\": %s, \"operator\": \"In\", \"values\": [%s, %s]}]}, \"podSelector\": {\"matchLabels\": {\"color\": \"blue\"}}}}}, {\"op\": \"add\", \"path\": \"/spec/egress/0/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"UDP\", \"port\": 53}}]}, {\"op\": \"add\", \"path\": \"/spec/egress/1/ports\", \"value\": [{\"portNumber\": {\"protocol\": \"TCP\", \"port\": 80}}]}]", matchLabelKey1, allNS[0], allNS[1])
}
patchReplaceResourceAsAdmin(oc, "anp/"+anpMultiCIDR.name, patchANPCIDR)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.subject}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Subject after update: %s", anpRules)
exutil.By("8.2 Label first pod in both namespace color=blue")
for i := 0; i < 2; i++ {
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", nsPodMap[allNS[i]][0], "-n", allNS[i], "color=blue").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
}
exutil.By("8.3 Validate only egress to TCP 80 works")
for i := 0; i < 2; i++ {
for _, ip := range checkIPAccessList {
verifyDstIPAccess(oc, nsPodMap[allNS[i]][0], allNS[i], ip, true)
}
verifyNslookup(oc, nsPodMap[allNS[i]][0], allNS[i], urlToLookup, false)
}
exutil.By("8.4 Create a network policy in first namespace")
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: googleDNSServerIP1,
namespace: allNS[0],
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
if ipStackType == "dualstack" {
patchNP = `[{"op": "replace", "path": "/spec/podSelector", "value": {"matchLabels": {"color": "blue"}}}, {"op": "add", "path": "/spec/egress/0/to/1", "value": {"ipBlock":{"cidr":"` + googleDNSServerIP2 + `"}}} ]`
} else {
patchNP = `[{"op": "replace", "path": "/spec/podSelector", "value": {"matchLabels": {"color": "blue"}}}]`
}
patchReplaceResourceAsAdmin(oc, "networkpolicy/"+npIPBlockNS1.name, patchNP, allNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", npIPBlockNS1.name, "-n", allNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
exutil.By("8.5 Validate egress to DNS server at port 53 only works from pod in first namespace")
verifyNslookup(oc, nsPodMap[allNS[0]][0], allNS[0], urlToLookup, true)
verifyNslookup(oc, nsPodMap[allNS[1]][0], allNS[1], urlToLookup, false)
exutil.By("8.6 Update rule with pass action to allow to see egress UDP traffic works from with pod label color=blue in second namespace")
patchANPCIDR = `[{"op": "replace", "path": "/spec/egress/0/action", "value": "Allow"}]`
patchReplaceResourceAsAdmin(oc, "anp/"+anpMultiCIDR.name, patchANPCIDR)
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpMultiCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
verifyNslookup(oc, nsPodMap[allNS[1]][0], allNS[1], urlToLookup, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
18e0dd9b-226e-4fe6-aedb-e989aa9834aa
|
Author:asood-High-73963-[rducluster] BANP and ANP with AdminpolicybasedExternalRoutes (APBR). [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy.go
|
g.It("Author:asood-High-73963-[rducluster] BANP and ANP with AdminpolicybasedExternalRoutes (APBR). [Serial]", func() {
var (
testID = "73963"
anpNodeTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
banpNodeTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template-node.yaml")
banpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpNetworkTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataDir, "ping-for-pod-specific-node-template.yaml")
gwPodNodeTemplate = filepath.Join(testDataDir, "gw-pod-hostnetwork-template.yaml")
httpServerPodNodeTemplate = filepath.Join(testDataDir, "httpserverPod-specific-node-template.yaml")
apbrDynamicTemplate = filepath.Join(testDataDir, "apbexternalroute-dynamic-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
nodePodMap = make(map[string]string)
containerport int32 = 30001
hostport int32 = 30003
)
exutil.By("0. Get the non sriov and sriov workers list")
workers := excludeSriovNodes(oc)
if len(workers) < 3 {
g.Skip("This test can only be run for cluster that has atleast 3 non sriov worker nodes.")
}
sriovWorkers := getSriovNodes(oc)
if len(workers) < 1 {
g.Skip("This test can only be run for cluster that has atleast 1 sriov worker node.")
}
exutil.By("1. Create the served pods in the first namespace on sriov node and non sriov node")
servedNs := oc.Namespace()
exutil.SetNamespacePrivileged(oc, servedNs)
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", servedNs, "multiple_gws=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pod := pingPodResourceNode{
name: "test-pod-" + testID + "-0",
namespace: servedNs,
nodename: sriovWorkers[0],
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, servedNs, pod.name)
nodePodMap[pod.nodename] = pod.name
pod.name = "test-pod-" + testID + "-1"
pod.nodename = workers[2]
pod.createPingPodNode(oc)
waitPodReady(oc, servedNs, pod.name)
nodePodMap[pod.nodename] = pod.name
exutil.By("2. Create second namespace for the serving pod.")
oc.SetupProject()
servingNs := oc.Namespace()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", servingNs, "gws=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+servingNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("2.1. Create the serving pod in serving namespace %s", servingNs))
pod.name = "ext-gw-" + testID
pod.namespace = servingNs
pod.nodename = workers[0]
pod.template = gwPodNodeTemplate
pod.createPingPodNode(oc)
waitPodReady(oc, servingNs, pod.name)
nodePodMap[pod.nodename] = pod.name
gwPodNodeIP := getNodeIPv4(oc, servingNs, workers[0])
exutil.By("3. Create third namespace for the host port pod.")
oc.SetupProject()
hostPortPodNs := oc.Namespace()
exutil.SetNamespacePrivileged(oc, hostPortPodNs)
exutil.By(fmt.Sprintf("3.1 Create a host port pod in %s", hostPortPodNs))
httpServerPod := httpserverPodResourceNode{
name: "hostportpod-" + testID,
namespace: hostPortPodNs,
containerport: containerport,
hostport: hostport,
nodename: workers[1],
template: httpServerPodNodeTemplate,
}
httpServerPod.createHttpservePodNodeByAdmin(oc)
waitPodReady(oc, hostPortPodNs, httpServerPod.name)
nodePodMap[httpServerPod.nodename] = httpServerPod.name
exutil.By("4. Create admin policy based dynamic external routes")
apbr := apbDynamicExternalRoute{
name: "apbr-" + testID,
labelKey: "multiple_gws",
labelValue: "true",
podLabelKey: "gw",
podLabelValue: "true",
namespaceLabelKey: "gws",
namespaceLabelValue: "true",
bfd: true,
template: apbrDynamicTemplate,
}
defer removeResource(oc, true, true, "apbexternalroute", apbr.name)
apbr.createAPBDynamicExternalRoute(oc)
apbExtRouteCheckErr := checkAPBExternalRouteStatus(oc, apbr.name, "Success")
o.Expect(apbExtRouteCheckErr).NotTo(o.HaveOccurred())
exutil.By("5. Get one IP address for domain name www.google.com")
ipv4, _ := getIPFromDnsName("www.google.com")
o.Expect(len(ipv4) == 0).NotTo(o.BeTrue())
exutil.By("6.1 Egress traffic works before BANP is created")
verifyDstIPAccess(oc, nodePodMap[sriovWorkers[0]], servedNs, ipv4, true)
exutil.By("6.2 Create a BANP to deny egress to all networks")
banpCIDR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: servedNs,
ruleName: "deny egress to all networks",
ruleAction: "Deny",
cidr: "0.0.0.0/0",
template: banpNetworkTemplate,
}
defer removeResource(oc, true, true, "banp", banpCIDR.name)
banpCIDR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCIDR.name)).To(o.BeTrue())
exutil.By("6.3 Egress traffic does not works after BANP is created")
verifyDstIPAccess(oc, nodePodMap[sriovWorkers[0]], servedNs, ipv4, false)
exutil.By("7. Create a ANP to allow traffic to host running http server and verify egress traffic works")
anpCIDR := singleRuleCIDRANPPolicyResource{
name: "anp-network-egress-peer-" + testID,
subjectKey: matchLabelKey,
subjectVal: servedNs,
priority: 10,
ruleName: "allow egress to gateway pod",
ruleAction: "Allow",
cidr: gwPodNodeIP + "/32",
template: anpNetworkTemplate,
}
defer removeResource(oc, true, true, "anp", anpCIDR.name)
anpCIDR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCIDR.name)).To(o.BeTrue())
patchANPCIDR := fmt.Sprintf("[{\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\": %s/24}]", ipv4)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCIDR.name, "--type=json", "-p", patchANPCIDR).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpCIDR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
exutil.By("7.1 Egress traffic works after ANP is created")
verifyDstIPAccess(oc, nodePodMap[sriovWorkers[0]], servedNs, ipv4, true)
exutil.By("8.0 Delete BANP and ANP")
removeResource(oc, true, true, "anp", anpCIDR.name)
removeResource(oc, true, true, "banp", banpCIDR.name)
exutil.By("9.1 Validate egress traffic before BANP is created.")
CurlPod2NodePass(oc, servedNs, nodePodMap[sriovWorkers[0]], workers[1], strconv.Itoa(int(hostport)))
exutil.By("9.2 Create BANP to block egress traffic from all the worker nodes.")
banpNode := singleRuleBANPPolicyResourceNode{
name: "default",
subjectKey: matchLabelKey,
subjectVal: servedNs,
policyType: "egress",
direction: "to",
ruleName: "default egress from all nodes",
ruleAction: "Deny",
ruleKey: "kubernetes.io/hostname",
template: banpNodeTemplate,
}
defer removeResource(oc, true, true, "banp", banpNode.name)
banpNode.createSingleRuleBANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpNode.name)).To(o.BeTrue())
exutil.By("9.3 Validate egress traffic after BANP is created.")
CurlPod2NodeFail(oc, servedNs, nodePodMap[sriovWorkers[0]], workers[1], strconv.Itoa(int(hostport)))
CurlPod2NodeFail(oc, servedNs, nodePodMap[workers[2]], workers[1], strconv.Itoa(int(hostport)))
exutil.By("10.0 Create ANP with egress traffic allowed from nodes that have a served pod and serving pod scheduled")
anpNode := singleRuleANPPolicyResourceNode{
name: "anp-node-egress-peer-" + testID,
subjectKey: matchLabelKey,
subjectVal: servedNs,
priority: 10,
policyType: "egress",
direction: "to",
ruleName: "allow egress",
ruleAction: "Allow",
ruleKey: "kubernetes.io/hostname",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: workers[0],
actionname: "pass egress",
actiontype: "Pass",
template: anpNodeTemplate,
}
defer removeResource(oc, true, true, "anp", anpNode.name)
anpNode.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpNode.name)).To(o.BeTrue())
patchANP := fmt.Sprintf("[{\"op\": \"remove\", \"path\":\"/spec/egress/1\"}, {\"op\": \"replace\", \"path\":\"/spec/egress/0/to/0/nodes/matchExpressions/0/values\", \"value\":[%s, %s, %s] }]", workers[0], workers[1], sriovWorkers[0])
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpNode.name, "--type=json", "-p", patchANP).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
anpRules, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpNode.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update: %s", anpRules)
CurlPod2NodePass(oc, servedNs, nodePodMap[sriovWorkers[0]], workers[1], strconv.Itoa(int(hostport)))
CurlPod2NodeFail(oc, servedNs, nodePodMap[workers[2]], workers[1], strconv.Itoa(int(hostport)))
})
| |||||
file
|
openshift/openshift-tests-private
|
75fb82b8-c5d4-4db3-8618-03d5220969ff
|
adminnetworkpolicy_utils
|
import (
"encoding/json"
"fmt"
"github.com/tidwall/gjson"
"strconv"
"strings"
"time"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
o "github.com/onsi/gomega"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
package networking
import (
"encoding/json"
"fmt"
"github.com/tidwall/gjson"
"strconv"
"strings"
"time"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
o "github.com/onsi/gomega"
)
// Struct to create BANP with either ingress or egress single rule
// Match Label selector
type singleRuleBANPPolicyResource struct {
name string
subjectKey string
subjectVal string
policyType string
direction string
ruleName string
ruleAction string
ruleKey string
ruleVal string
template string
}
// Struct to create BANP with either ingress or egress multiple rules
// Match Label selector
// egress to
// ingress from
type multiRuleBANPPolicyResource struct {
name string
subjectKey string
subjectVal string
policyType string
direction string
ruleName1 string
ruleAction1 string
ruleKey1 string
ruleVal1 string
ruleName2 string
ruleAction2 string
ruleKey2 string
ruleVal2 string
ruleName3 string
ruleAction3 string
ruleKey3 string
ruleVal3 string
template string
}
type singleRuleBANPPolicyResourceNode struct {
name string
subjectKey string
subjectVal string
policyType string
direction string
ruleName string
ruleAction string
ruleKey string
template string
}
// Rule using MatchExpressions
type singleRuleBANPMEPolicyResource struct {
name string
subjectKey string
subjectOperator string
subjectVal string
policyType string
direction string
ruleName string
ruleAction string
ruleKey string
ruleOperator string
ruleVal string
template string
}
// Struct to create BANP multiple rules with mixed ingress or egress direction
// Match Label selector
// pod peer
type multiPodMixedRuleBANPPolicyResource struct {
name string
subjectKey string
subjectVal string
subjectPodKey string
subjectPodVal string
policyType1 string
direction1 string
ruleName1 string
ruleAction1 string
ruleKey1 string
ruleVal1 string
rulePodKey1 string
rulePodVal1 string
policyType2 string
direction2 string
ruleName2 string
ruleAction2 string
ruleKey2 string
ruleVal2 string
rulePodKey2 string
rulePodVal2 string
template string
}
// Struct to create ANP with either ingress or egress single rule
// Match Label selector
// egress to
// ingress from
type singleRuleANPPolicyResource struct {
name string
subjectKey string
subjectVal string
priority int32
policyType string
direction string
ruleName string
ruleAction string
ruleKey string
ruleVal string
template string
}
type singleRuleANPPolicyResourceNode struct {
name string
subjectKey string
subjectVal string
priority int32
policyType string
direction string
ruleName string
ruleAction string
ruleKey string
nodeKey string
ruleVal string
actionname string
actiontype string
template string
}
// Struct to create ANP with either ingress or egress multiple rules
// Match Label selector
// egress to
// ingress from
type multiRuleANPPolicyResource struct {
name string
subjectKey string
subjectVal string
priority int32
policyType string
direction string
ruleName1 string
ruleAction1 string
ruleKey1 string
ruleVal1 string
ruleName2 string
ruleAction2 string
ruleKey2 string
ruleVal2 string
ruleName3 string
ruleAction3 string
ruleKey3 string
ruleVal3 string
template string
}
// Struct to create ANP with multiple rules ingress or egress direction
// Match Label selector
// pods peer
// Two rules of the three will have same direction but action may vary
type multiPodMixedRuleANPPolicyResource struct {
name string
subjectKey string
subjectVal string
subjectPodKey string
subjectPodVal string
priority int32
policyType1 string
direction1 string
ruleName1 string
ruleAction1 string
ruleKey1 string
ruleVal1 string
rulePodKey1 string
rulePodVal1 string
policyType2 string
direction2 string
ruleName2 string
ruleAction2 string
ruleKey2 string
ruleVal2 string
rulePodKey2 string
rulePodVal2 string
ruleName3 string
ruleAction3 string
ruleKey3 string
ruleVal3 string
rulePodKey3 string
rulePodVal3 string
template string
}
type networkPolicyResource struct {
name string
namespace string
policy string
direction1 string
namespaceSel1 string
namespaceSelKey1 string
namespaceSelVal1 string
direction2 string `json:omitempty`
namespaceSel2 string `json:omitempty`
namespaceSelKey2 string `json:omitempty`
namespaceSelVal2 string `json:omitempty`
policyType string
template string
}
// Resource to create a network policy with protocol
// Namespace and Pod Selector
// policy - egress or ingress
// policyType - Egress or Ingress
type networkPolicyProtocolResource struct {
name string
namespace string
policy string
policyType string
direction string
namespaceSel string
namespaceSelKey string
namespaceSelVal string
podSel string
podSelKey string
podSelVal string
port int
protocol string
template string
}
type replicationControllerPingPodResource struct {
name string
replicas int
namespace string
template string
}
// Struct to create BANP with either ingress and egress rule
// Match cidr
type singleRuleCIDRBANPPolicyResource struct {
name string
subjectKey string
subjectVal string
ruleName string
ruleAction string
cidr string
template string
}
// Struct to create ANP with either ingress or egress rule
// Match cidr
type singleRuleCIDRANPPolicyResource struct {
name string
subjectKey string
subjectVal string
priority int32
ruleName string
ruleAction string
cidr string
template string
}
// Struct to create ANP with multiple rules either ingress or egress direction
// Match cidr
type MultiRuleCIDRANPPolicyResource struct {
name string
subjectKey string
subjectVal string
priority int32
ruleName1 string
ruleAction1 string
cidr1 string
ruleName2 string
ruleAction2 string
cidr2 string
template string
}
// Struct to create ANP with either ingress or egress single rule
// Match Expression selector
// egress to
// ingress from
type singleRuleANPMEPolicyResource struct {
name string
subjectKey string
subjectOperator string
subjectVal string
priority int32
policyType string
direction string
ruleName string
ruleAction string
ruleKey string
ruleOperator string
ruleVal string
template string
}
func (banp *singleRuleBANPPolicyResource) createSingleRuleBANP(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction, "RULEKEY="+banp.ruleKey, "RULEVAL="+banp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
func (banp *singleRuleBANPPolicyResourceNode) createSingleRuleBANPNode(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction, "RULEKEY="+banp.ruleKey)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
func (banp *multiRuleBANPPolicyResource) createMultiRuleBANP(oc *exutil.CLI) {
exutil.By("Creating Multi rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME1="+banp.ruleName1, "RULEACTION1="+banp.ruleAction1, "RULEKEY1="+banp.ruleKey1, "RULEVAL1="+banp.ruleVal1,
"RULENAME2="+banp.ruleName2, "RULEACTION2="+banp.ruleAction2, "RULEKEY2="+banp.ruleKey2, "RULEVAL2="+banp.ruleVal2,
"RULENAME3="+banp.ruleName3, "RULEACTION3="+banp.ruleAction3, "RULEKEY3="+banp.ruleKey3, "RULEVAL3="+banp.ruleVal3)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", banp.name))
}
func (banp *singleRuleBANPMEPolicyResource) createSingleRuleBANPMatchExp(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTOPERATOR="+banp.subjectOperator, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction,
"RULEKEY="+banp.ruleKey, "RULEOPERATOR="+banp.ruleOperator, "RULEVAL="+banp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
func (banp *multiPodMixedRuleBANPPolicyResource) createMultiPodMixedRuleBANP(oc *exutil.CLI) {
exutil.By("Creating Multi rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal, "SUBJECTPODKEY="+banp.subjectPodKey, "SUBJECTPODVAL="+banp.subjectPodVal,
"POLICYTYPE1="+banp.policyType1, "DIRECTION1="+banp.direction1, "RULENAME1="+banp.ruleName1, "RULEACTION1="+banp.ruleAction1,
"RULEKEY1="+banp.ruleKey1, "RULEVAL1="+banp.ruleVal1, "RULEPODKEY1="+banp.rulePodKey1, "RULEPODVAL1="+banp.rulePodVal1,
"POLICYTYPE2="+banp.policyType2, "DIRECTION2="+banp.direction2, "RULENAME2="+banp.ruleName2, "RULEACTION2="+banp.ruleAction2,
"RULEKEY2="+banp.ruleKey2, "RULEVAL2="+banp.ruleVal2, "RULEPODKEY2="+banp.rulePodKey2, "RULEPODVAL2="+banp.rulePodVal2)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", banp.name))
}
func (anp *singleRuleANPPolicyResource) createSingleRuleANP(oc *exutil.CLI) {
exutil.By("Creating Single rule Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name,
"POLICYTYPE="+anp.policyType, "DIRECTION="+anp.direction,
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal,
"PRIORITY="+strconv.Itoa(int(anp.priority)), "RULENAME="+anp.ruleName, "RULEACTION="+anp.ruleAction, "RULEKEY="+anp.ruleKey, "RULEVAL="+anp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func (anp *singleRuleANPPolicyResourceNode) createSingleRuleANPNode(oc *exutil.CLI) {
exutil.By("Creating Single rule Admin Network Policy from template for Node")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name,
"POLICYTYPE="+anp.policyType, "DIRECTION="+anp.direction,
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal,
"PRIORITY="+strconv.Itoa(int(anp.priority)), "RULENAME="+anp.ruleName, "RULEACTION="+anp.ruleAction, "RULEKEY="+anp.ruleKey, "NODEKEY="+anp.nodeKey, "RULEVAL="+anp.ruleVal, "ACTIONNAME="+anp.actionname, "ACTIONTYPE="+anp.actiontype)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func (anp *multiRuleANPPolicyResource) createMultiRuleANP(oc *exutil.CLI) {
exutil.By("Creating Multi rule Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name, "PRIORITY="+strconv.Itoa(int(anp.priority)),
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal,
"POLICYTYPE="+anp.policyType, "DIRECTION="+anp.direction,
"RULENAME1="+anp.ruleName1, "RULEACTION1="+anp.ruleAction1, "RULEKEY1="+anp.ruleKey1, "RULEVAL1="+anp.ruleVal1,
"RULENAME2="+anp.ruleName2, "RULEACTION2="+anp.ruleAction2, "RULEKEY2="+anp.ruleKey2, "RULEVAL2="+anp.ruleVal2,
"RULENAME3="+anp.ruleName3, "RULEACTION3="+anp.ruleAction3, "RULEKEY3="+anp.ruleKey2, "RULEVAL3="+anp.ruleVal3)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func (anp *singleRuleANPMEPolicyResource) createSingleRuleANPMatchExp(oc *exutil.CLI) {
exutil.By("Creating Single rule Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name,
"POLICYTYPE="+anp.policyType, "DIRECTION="+anp.direction,
"SUBJECTKEY="+anp.subjectKey, "SUBJECTOPERATOR="+anp.subjectOperator, "SUBJECTVAL="+anp.subjectVal,
"PRIORITY="+strconv.Itoa(int(anp.priority)), "RULENAME="+anp.ruleName, "RULEACTION="+anp.ruleAction,
"RULEKEY="+anp.ruleKey, "RULEOPERATOR="+anp.ruleOperator, "RULEVAL="+anp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func (anp *multiPodMixedRuleANPPolicyResource) createMultiPodMixedRuleANP(oc *exutil.CLI) {
exutil.By("Creating Multi rule Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name, "PRIORITY="+strconv.Itoa(int(anp.priority)),
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal, "SUBJECTPODKEY="+anp.subjectPodKey, "SUBJECTPODVAL="+anp.subjectPodVal,
"POLICYTYPE1="+anp.policyType1, "DIRECTION1="+anp.direction1, "RULENAME1="+anp.ruleName1, "RULEACTION1="+anp.ruleAction1,
"RULEKEY1="+anp.ruleKey1, "RULEVAL1="+anp.ruleVal1, "RULEPODKEY1="+anp.rulePodKey1, "RULEPODVAL1="+anp.rulePodVal1,
"POLICYTYPE2="+anp.policyType2, "DIRECTION2="+anp.direction2, "RULENAME2="+anp.ruleName2, "RULEACTION2="+anp.ruleAction2,
"RULEKEY2="+anp.ruleKey2, "RULEVAL2="+anp.ruleVal2, "RULEPODKEY2="+anp.rulePodKey2, "RULEPODVAL2="+anp.rulePodVal2,
"RULENAME3="+anp.ruleName3, "RULEACTION3="+anp.ruleAction3,
"RULEKEY3="+anp.ruleKey2, "RULEVAL3="+anp.ruleVal3, "RULEPODKEY3="+anp.rulePodKey2, "RULEPODVAL3="+anp.rulePodVal3)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func (rcPingPod *replicationControllerPingPodResource) createReplicaController(oc *exutil.CLI) {
exutil.By("Creating replication controller from template")
replicasString := fmt.Sprintf("REPLICAS=%v", rcPingPod.replicas)
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", rcPingPod.template, "-p", "PODNAME="+rcPingPod.name,
"NAMESPACE="+rcPingPod.namespace, replicasString)
if err1 != nil {
e2e.Logf("Error creating replication controller:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create replicationcontroller %v", rcPingPod.name))
}
func (netpol *networkPolicyResource) createNetworkPolicy(oc *exutil.CLI) {
exutil.By("Creating networkpolicy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", netpol.template, "-p", "NAME="+netpol.name,
"NAMESPACE="+netpol.namespace, "POLICY="+netpol.policy,
"DIRECTION1="+netpol.direction1,
"NAMESPACESEL1="+netpol.namespaceSel1, "NAMESPACESELKEY1="+netpol.namespaceSelKey1, "NAMESPACESELVAL1="+netpol.namespaceSelVal1,
"DIRECTION2="+netpol.direction2,
"NAMESPACESEL2="+netpol.namespaceSel2, "NAMESPACESELKEY2="+netpol.namespaceSelKey2, "NAMESPACESELVAL2="+netpol.namespaceSelVal2,
"POLICYTYPE="+netpol.policyType)
if err1 != nil {
e2e.Logf("Error creating networkpolicy :%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create networkpolicy %v", netpol.name))
}
func (netpol *networkPolicyProtocolResource) createProtocolNetworkPolicy(oc *exutil.CLI) {
exutil.By("Creating protocol networkpolicy from template")
portString := fmt.Sprintf("PORT=%v", netpol.port)
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", netpol.template, "-p", "NAME="+netpol.name,
"NAMESPACE="+netpol.namespace, "POLICY="+netpol.policy, "POLICYTYPE="+netpol.policyType,
"DIRECTION="+netpol.direction,
"NAMESPACESEL="+netpol.namespaceSel, "NAMESPACESELKEY="+netpol.namespaceSelKey, "NAMESPACESELVAL="+netpol.namespaceSelVal,
"PODSEL="+netpol.podSel, "PODSELKEY="+netpol.podSelKey, "PODSELVAL="+netpol.podSelVal,
"PROTOCOL="+netpol.protocol, portString)
if err1 != nil {
e2e.Logf("Error creating networkpolicy :%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create networkpolicy %v", netpol.name))
}
func (banp *singleRuleCIDRBANPPolicyResource) createSingleRuleCIDRBANP(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction, "CIDR="+banp.cidr)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
func (anp *singleRuleCIDRANPPolicyResource) createSingleRuleCIDRANP(oc *exutil.CLI) {
exutil.By("Creating Single rule Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name,
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal,
"PRIORITY="+strconv.Itoa(int(anp.priority)), "RULENAME="+anp.ruleName, "RULEACTION="+anp.ruleAction, "CIDR="+anp.cidr)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func (anp *MultiRuleCIDRANPPolicyResource) createMultiRuleCIDRANP(oc *exutil.CLI) {
exutil.By("Creating multi-rules Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name,
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal,
"PRIORITY="+strconv.Itoa(int(anp.priority)), "RULENAME1="+anp.ruleName1, "RULEACTION1="+anp.ruleAction1, "CIDR1="+anp.cidr1,
"RULENAME2="+anp.ruleName2, "RULEACTION2="+anp.ruleAction2, "CIDR2="+anp.cidr2)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
func checkUDPTraffic(oc *exutil.CLI, clientPodName string, clientPodNamespace string, serverPodName string, serverPodNamespace string, serverUdpPort string, resultPass bool) {
e2e.Logf("Listening on pod %s at port %s", serverPodName, serverUdpPort)
ipStackType := checkIPStackType(oc)
var udpServerPodIPList []string
switch ipStackType {
case "ipv4single":
udpServerPodIPList = append(udpServerPodIPList, getPodIPv4(oc, serverPodNamespace, serverPodName))
case "ipv6single":
udpServerPodIPList = append(udpServerPodIPList, getPodIPv6(oc, serverPodNamespace, serverPodName, ipStackType))
case "dualstack":
udpServerPodIPList = append(udpServerPodIPList, getPodIPv4(oc, serverPodNamespace, serverPodName))
udpServerPodIPList = append(udpServerPodIPList, getPodIPv6(oc, serverPodNamespace, serverPodName, ipStackType))
default:
e2e.Logf("Stack type could not be determined")
}
udpServerCmd := fmt.Sprintf("timeout --preserve-status 60 ncat -u -l %s", serverUdpPort)
for _, udpServerPodIP := range udpServerPodIPList {
cmdNcat, cmdOutput, _, ncatCmdErr := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", serverPodNamespace, serverPodName, "bash", "-c", udpServerCmd).Background()
defer cmdNcat.Process.Kill()
o.Expect(ncatCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("Sending UDP packets to pod %s", serverPodName)
cmd := fmt.Sprintf("echo hello | ncat -v -u %s %s", udpServerPodIP, serverUdpPort)
for i := 0; i < 2; i++ {
output, ncatCmdErr := execCommandInSpecificPod(oc, clientPodNamespace, clientPodName, cmd)
o.Expect(ncatCmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(output), "bytes sent")).To(o.BeTrue())
}
e2e.Logf("UDP pod server output %s", cmdOutput)
if resultPass {
o.Expect(strings.Contains(cmdOutput.String(), "hello")).To(o.BeTrue())
} else {
o.Expect(strings.Contains(cmdOutput.String(), "hello")).To(o.BeFalse())
}
cmdNcat.Process.Kill()
}
}
func checkSCTPTraffic(oc *exutil.CLI, clientPodName string, clientPodNamespace string, serverPodName string, serverPodNamespace string, resultPass bool) {
ipStackType := checkIPStackType(oc)
var sctpServerPodIPList []string
switch ipStackType {
case "ipv4single":
sctpServerPodIPList = append(sctpServerPodIPList, getPodIPv4(oc, serverPodNamespace, serverPodName))
case "ipv6single":
sctpServerPodIPList = append(sctpServerPodIPList, getPodIPv6(oc, serverPodNamespace, serverPodName, ipStackType))
case "dualstack":
sctpServerPodIPList = append(sctpServerPodIPList, getPodIPv4(oc, serverPodNamespace, serverPodName))
sctpServerPodIPList = append(sctpServerPodIPList, getPodIPv6(oc, serverPodNamespace, serverPodName, ipStackType))
default:
e2e.Logf("Stack type could not be determined")
}
for _, sctpServerPodIP := range sctpServerPodIPList {
e2e.Logf("SCTP server pod listening for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", serverPodNamespace, serverPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check SCTP process running in the SCTP server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(serverPodNamespace, serverPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No SCTP process running on SCTP server pod")
e2e.Logf("SCTP client pod sending SCTP traffic")
_, err1 := e2eoutput.RunHostCmd(clientPodNamespace, clientPodName, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
if resultPass {
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By("Server SCTP process will end after receiving SCTP traffic from SCTP client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(serverPodNamespace, serverPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "SCTP process didn't end after getting SCTP traffic from SCTP client")
} else {
e2e.Logf("SCTP traffic is blocked")
o.Expect(err1).To(o.HaveOccurred())
}
cmdNcat.Process.Kill()
}
}
func checkACLLogs(oc *exutil.CLI, serverPodNs string, serverPodName string, clientPodNs string, clientPodName string, curlCmd string, aclLogSearchString string, ovnKNodePodName string, resultPass bool) {
tailACLLog := "tail -f /var/log/ovn/acl-audit-log.log"
tailACLLogCmd, cmdOutput, _, cmdErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-ovn-kubernetes", ovnKNodePodName, "-c", "ovn-controller", "--", "/bin/bash", "-c", tailACLLog).Background()
defer tailACLLogCmd.Process.Kill()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
if curlCmd == "pass" {
CurlPod2PodPass(oc, serverPodNs, serverPodName, clientPodNs, clientPodName)
} else {
CurlPod2PodFail(oc, serverPodNs, serverPodName, clientPodNs, clientPodName)
}
e2e.Logf("Log output: \n %s", cmdOutput.String())
if resultPass {
o.Expect(strings.Contains(cmdOutput.String(), aclLogSearchString)).To(o.BeTrue())
e2e.Logf("Found the expected string - %s", aclLogSearchString)
} else {
o.Expect(strings.Contains(cmdOutput.String(), aclLogSearchString)).To(o.BeFalse())
}
tailACLLogCmd.Process.Kill()
}
func checkSpecificPolicyStatus(oc *exutil.CLI, policyType string, policyName string, lookupStatusKey string, expectedStatusStr string) (result bool, resultMsg string) {
e2e.Logf("Checking status of %s named %s for '%s' in '%s'", strings.ToUpper(policyType), policyName, expectedStatusStr, lookupStatusKey)
result = true
resultMsg = ""
allNodes, err := exutil.GetAllNodes(oc)
if err != nil {
return false, fmt.Sprintf("%v", err)
}
statusOutput, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args(policyType, policyName, `-ojsonpath={.status}`).Output()
if messagesErr != nil {
return false, fmt.Sprintf("%v", messagesErr)
}
var data map[string]interface{}
json.Unmarshal([]byte(statusOutput), &data)
allConditions := data["conditions"].([]interface{})
if len(allConditions) != len(allNodes) {
resultMsg = "Failed to obtain status for all nodes in cluster"
return false, resultMsg
}
for i := 0; i < len(allConditions); i++ {
for statusKey, statusVal := range allConditions[i].(map[string]interface{}) {
if statusKey == lookupStatusKey && !strings.Contains(statusVal.(string), expectedStatusStr) {
resultMsg = fmt.Sprintf("Failed to find: %s", expectedStatusStr)
return false, resultMsg
}
}
}
return result, resultMsg
}
func getPolicyMetrics(oc *exutil.CLI, metricsName string, expectedValue string, addArgs ...string) (bool, error) {
metricsValue := ""
switch argCount := len(addArgs); argCount {
case 1:
e2e.Logf("Obtaining metrics %s for DB Object - %s", metricsName, addArgs[0])
case 2:
e2e.Logf("Obtaining metrics %s for %s rule and %s action", metricsName, addArgs[0], addArgs[1])
default:
e2e.Logf("Obtaining metrics %s without any additional arguments", metricsName)
}
result := true
olmToken, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(olmToken).NotTo(o.BeEmpty())
url := fmt.Sprintf("localhost:9090/api/v1/query?query=%s", metricsName)
metricsErr := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", olmToken), fmt.Sprintf("%s", url)).Output()
if err != nil {
e2e.Logf("Unable to get metrics and try again, the error is:%s", err)
result = false
return result, nil
}
metric := gjson.Get(output, "data.result.#.metric")
for index, metricVal := range metric.Array() {
metricMap := metricVal.Map()
if strings.Contains((metricMap["__name__"]).String(), "rules") {
if !(strings.Contains((metricMap["direction"]).String(), addArgs[0]) && strings.Contains((metricMap["action"]).String(), addArgs[1])) {
continue
}
} else if strings.Contains((metricMap["__name__"]).String(), "db") {
if !(strings.Contains((metricMap["table_name"]).String(), addArgs[0])) {
continue
}
}
val := gjson.Get(output, "data.result."+strconv.Itoa(index)+".value")
metricsValue = strings.TrimSuffix(strings.Split(val.String(), ",")[1], "]")
}
if !strings.Contains(metricsValue, expectedValue) {
result = false
e2e.Logf("The value for %s is not %s as expected", metricsName, expectedValue)
return result, nil
} else {
result = true
e2e.Logf("The value for %s is %s as expected", metricsName, expectedValue)
return result, nil
}
})
exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr))
return result, nil
}
// Perform nslookup of the url provided with IP address of google DNS server
func verifyNslookup(oc *exutil.CLI, clientPodName string, clientPodNamespace string, urlToLookup string, resultPass bool) {
var cmdList = []string{}
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType == "dualstack" {
cmdList = append(cmdList, "nslookup "+urlToLookup+" 8.8.8.8")
if checkIPv6PublicAccess(oc) {
cmdList = append(cmdList, "nslookup "+urlToLookup+" 2001:4860:4860::8888")
}
} else {
if ipStackType == "ipv6single" && checkIPv6PublicAccess(oc) {
cmdList = append(cmdList, "nslookup "+urlToLookup+" 2001:4860:4860::8888")
} else {
cmdList = append(cmdList, "nslookup "+urlToLookup+" 8.8.8.8")
}
}
for _, cmd := range cmdList {
res, err := exutil.RemoteShPodWithBash(oc, clientPodNamespace, clientPodName, cmd)
if resultPass {
e2e.Logf("nslookup is allowed")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(res, "www.facebook.com")).Should(o.BeTrue(), fmt.Sprintf("The nslookup did not succeed as expected:%s", res))
} else {
e2e.Logf("nslookup is blocked")
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(res, "connection timed out; no servers could be reached")).Should(o.BeTrue(), fmt.Sprintf("The nslookup did not fail as expected:%s", res))
}
}
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
7a0a0a03-14ee-408a-8168-85f7e6f4a759
|
createSingleRuleBANP
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['singleRuleBANPPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
func (banp *singleRuleBANPPolicyResource) createSingleRuleBANP(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction, "RULEKEY="+banp.ruleKey, "RULEVAL="+banp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
57400d6c-a62b-42b1-bb4a-0e7de10de350
|
createSingleRuleBANPNode
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['singleRuleBANPPolicyResourceNode']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
func (banp *singleRuleBANPPolicyResourceNode) createSingleRuleBANPNode(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction, "RULEKEY="+banp.ruleKey)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
9f2d9433-ea26-4762-b665-4865b895278d
|
createMultiRuleBANP
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['multiRuleBANPPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
func (banp *multiRuleBANPPolicyResource) createMultiRuleBANP(oc *exutil.CLI) {
exutil.By("Creating Multi rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME1="+banp.ruleName1, "RULEACTION1="+banp.ruleAction1, "RULEKEY1="+banp.ruleKey1, "RULEVAL1="+banp.ruleVal1,
"RULENAME2="+banp.ruleName2, "RULEACTION2="+banp.ruleAction2, "RULEKEY2="+banp.ruleKey2, "RULEVAL2="+banp.ruleVal2,
"RULENAME3="+banp.ruleName3, "RULEACTION3="+banp.ruleAction3, "RULEKEY3="+banp.ruleKey3, "RULEVAL3="+banp.ruleVal3)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", banp.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
d47ae336-9142-4b99-a1cb-369a89a000cc
|
createSingleRuleBANPMatchExp
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['singleRuleBANPMEPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
func (banp *singleRuleBANPMEPolicyResource) createSingleRuleBANPMatchExp(oc *exutil.CLI) {
exutil.By("Creating single rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTOPERATOR="+banp.subjectOperator, "SUBJECTVAL="+banp.subjectVal,
"POLICYTYPE="+banp.policyType, "DIRECTION="+banp.direction,
"RULENAME="+banp.ruleName, "RULEACTION="+banp.ruleAction,
"RULEKEY="+banp.ruleKey, "RULEOPERATOR="+banp.ruleOperator, "RULEVAL="+banp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Baseline Admin Network Policy CR %v", banp.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
eb38ce58-c16d-4e49-bf08-158ac6a9190d
|
createMultiPodMixedRuleBANP
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['multiPodMixedRuleBANPPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
func (banp *multiPodMixedRuleBANPPolicyResource) createMultiPodMixedRuleBANP(oc *exutil.CLI) {
exutil.By("Creating Multi rule Baseline Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", banp.template, "-p", "NAME="+banp.name,
"SUBJECTKEY="+banp.subjectKey, "SUBJECTVAL="+banp.subjectVal, "SUBJECTPODKEY="+banp.subjectPodKey, "SUBJECTPODVAL="+banp.subjectPodVal,
"POLICYTYPE1="+banp.policyType1, "DIRECTION1="+banp.direction1, "RULENAME1="+banp.ruleName1, "RULEACTION1="+banp.ruleAction1,
"RULEKEY1="+banp.ruleKey1, "RULEVAL1="+banp.ruleVal1, "RULEPODKEY1="+banp.rulePodKey1, "RULEPODVAL1="+banp.rulePodVal1,
"POLICYTYPE2="+banp.policyType2, "DIRECTION2="+banp.direction2, "RULENAME2="+banp.ruleName2, "RULEACTION2="+banp.ruleAction2,
"RULEKEY2="+banp.ruleKey2, "RULEVAL2="+banp.ruleVal2, "RULEPODKEY2="+banp.rulePodKey2, "RULEPODVAL2="+banp.rulePodVal2)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", banp.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
2930d1a5-899e-4cb9-906b-ac03c2f410a6
|
createSingleRuleANP
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['singleRuleANPPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/adminnetworkpolicy_utils.go
|
func (anp *singleRuleANPPolicyResource) createSingleRuleANP(oc *exutil.CLI) {
exutil.By("Creating Single rule Admin Network Policy from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", anp.template, "-p", "NAME="+anp.name,
"POLICYTYPE="+anp.policyType, "DIRECTION="+anp.direction,
"SUBJECTKEY="+anp.subjectKey, "SUBJECTVAL="+anp.subjectVal,
"PRIORITY="+strconv.Itoa(int(anp.priority)), "RULENAME="+anp.ruleName, "RULEACTION="+anp.ruleAction, "RULEKEY="+anp.ruleKey, "RULEVAL="+anp.ruleVal)
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create Admin Network Policy CR %v", anp.name))
}
|
networking
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.