element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
e9b257f8-381e-4c23-ac8d-eba9fdcb4ac7
|
Author:yingwang-NonHyperShiftHOST-Medium-73625-[rducluster]external traffic can access MetalLB service when EgressIP is applied and service ETP=local. [Disruptive]
|
['"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_egressip_ovn.go
|
g.It("Author:yingwang-NonHyperShiftHOST-Medium-73625-[rducluster]external traffic can access MetalLB service when EgressIP is applied and service ETP=local. [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
testDataMetallbDir = exutil.FixturePath("testdata", "networking/metallb")
mlNSTemplate = filepath.Join(testDataMetallbDir, "namespace-template.yaml")
mlOperatorGroupTemplate = filepath.Join(testDataMetallbDir, "operatorgroup-template.yaml")
mlSubscriptionTemplate = filepath.Join(testDataMetallbDir, "subscription-template.yaml")
mlNs = "metallb-system"
exteranlHost = "10.8.1.181"
metalLBNodeSelKey = "node-role.kubernetes.io/worker"
metalLBNodeSelVal = ""
metalLBControllerSelKey = "node-role.kubernetes.io/worker"
metalLBControllerSelVal = ""
podLabelKey string
podLabelValue string
nsLabelKey = "name"
nsLabelValue = "test"
)
workers := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker")
if len(workers) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
freeIPs := findFreeIPs(oc, workers[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
exutil.By("create new namespace\n")
ns1 := oc.Namespace()
exutil.By("install Metallb operator\n")
sub := subscriptionResource{
name: "metallb-operator-sub",
namespace: mlNs,
operatorName: "metallb-operator",
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: mlSubscriptionTemplate,
}
ns := namespaceResource{
name: mlNs,
template: mlNSTemplate,
}
og := operatorGroupResource{
name: "metallb-operator",
namespace: mlNs,
targetNamespaces: "metallb-system",
template: mlOperatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
g.By("Making sure CRDs are successfully installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(
o.And(
o.ContainSubstring("bfdprofiles.metallb.io"),
o.ContainSubstring("bgpadvertisements.metallb.io"),
o.ContainSubstring("bgppeers.metallb.io"),
o.ContainSubstring("communities.metallb.io"),
o.ContainSubstring("ipaddresspools.metallb.io"),
o.ContainSubstring("l2advertisements.metallb.io"),
o.ContainSubstring("metallbs.metallb.io"),
))
exutil.By("1. Create MetalLB CR")
metallbCRTemplate := filepath.Join(testDataMetallbDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: mlNs,
nodeSelectorKey: metalLBNodeSelKey,
nodeSelectorVal: metalLBNodeSelVal,
controllerSelectorKey: metalLBControllerSelKey,
controllerSelectorVal: metalLBControllerSelVal,
template: metallbCRTemplate,
}
defer removeResource(oc, true, true, "metallb", metallbCR.name, "-n", metallbCR.namespace)
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - MetalLB CR Created")
exutil.By("2. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(networkBaseDir, "metallb-ipaddresspool-template.yaml")
ipAddresspool := networkingRes{
name: "ippool-" + getRandomString(),
namespace: mlNs,
kind: "ipddresspool",
tempfile: ipAddresspoolTemplate,
}
ipAddr := freeIPs[0] + "/32"
defer removeResource(oc, true, true, "IPAddressPool", ipAddresspool.name, "-n", ipAddresspool.namespace)
ipAddresspool.create(oc, "NAME="+ipAddresspool.name, "NAMESPACE="+ipAddresspool.namespace, "ADDRESS="+ipAddr)
l2AdTemplate := filepath.Join(networkBaseDir, "metallb-l2advertisement-template.yaml")
l2Ad := networkingRes{
name: "l2ad-" + getRandomString(),
namespace: mlNs,
kind: "L2Advertisement",
tempfile: l2AdTemplate,
}
defer removeResource(oc, true, true, "L2Advertisement", l2Ad.name, "-n", l2Ad.namespace)
l2Ad.create(oc, "NAME="+l2Ad.name, "NAMESPACE="+l2Ad.namespace, "IPADDRESSPOOL="+ipAddresspool.name)
exutil.By("3. Create a service with annotation to obtain IP from first addresspool")
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataMetallbDir, "loadbalancer-svc-annotated-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-73625",
namespace: ns1,
externaltrafficpolicy: "Local",
labelKey: "environ",
labelValue: "Prod",
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipAddresspool.name,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-73625 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, freeIPs[0])).To(o.BeTrue())
exutil.By("SUCCESS - Services created successfully")
exutil.By("Apply label to namespace\n")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Create an egressip object\n")
exutil.By("Apply EgressLabel Key to one node.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], egressNodeLabel, "true")
egressIPTemplate := filepath.Join(networkBaseDir, "egressip-config2-template.yaml")
podLabelKey = "name"
podLabelValue = svc.name
egressip := egressIPResource1{
name: "egressip-" + getRandomString(),
template: egressIPTemplate,
egressIP1: freeIPs[1],
nsLabelKey: nsLabelKey,
nsLabelValue: nsLabelValue,
podLabelKey: podLabelKey,
podLabelValue: podLabelValue,
}
defer egressip.deleteEgressIPObject1(oc)
egressip.createEgressIPObject2(oc)
verifyExpectedEIPNumInEIPObject(oc, egressip.name, 1)
externalHostCmd := "curl -k " + freeIPs[0] + ":80"
outPut, err := sshRunCmdOutPut(exteranlHost, "root", externalHostCmd)
e2e.Logf("traffic from external direct to pod1 result is %v", outPut)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(outPut, "Hello OpenShift")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
0fa9a1bb-3605-44e3-b799-36405ea033eb
|
Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-78663-Pods on default network and UDNs if applicable can access k8s service when its node is egressIP node [Serial]
|
['"context"', '"fmt"', '"net"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_egressip_ovn.go
|
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-78663-Pods on default network and UDNs if applicable can access k8s service when its node is egressIP node [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allNS []string
udnNS []string
)
exutil.By("1. Get node list")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
} else if (ipStackType == "ipv4single" || ipStackType == "ipv6single") && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev4 or singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
exutil.By("2.1 Get a namespace for default network. If NetworkSegmentation featuregate is enabled, create four more namespaces for two overlapping layer3 UDNs and overlapping layer2 UDNs")
ns1 := oc.Namespace()
allNS = append(allNS, ns1)
udnEnabled, _ := IsFeaturegateEnabled(oc, "NetworkSegmentation")
// if NetworkSegmentation featuregate is enabled, create labelled UDN namespaces for UDNs
if udnEnabled {
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
udnNS = append(udnNS, ns)
}
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
if udnEnabled {
exutil.By("2.2. Create two overlapping layer3 UDNs between ns2, ns3, create two overlapping layer2 UDN between ns4, ns5")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
for i := 0; i < 2; i++ {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr, ipv6cidr, cidr, "layer3")
createGeneralUDNCRD(oc, udnNS[i+2], "udn-network-layer2-"+udnNS[i+2], ipv4cidr, ipv6cidr, cidr, "layer2")
}
}
exutil.By("3. Apply EgressLabel Key to egressNode. Two egress nodes are needed for dualstack egressIP object")
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPv6s)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
} else if ipStackType == "ipv4single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPs(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-78663",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPNode string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNode = egressNode1
}
exutil.By("5. On each of egress node(s) and nonEgressNode, create a test pod, curl k8s service from each pod")
var nodeNames []string
if ipStackType == "dualstack" {
nodeNames = []string{assignedEIPNodev4, assignedEIPNodev6, nonEgressNode}
} else {
nodeNames = []string{assignedEIPNode, nonEgressNode}
}
e2e.Logf("nodeNames: %s , length of nodeName is: %d", nodeNames, len(nodeNames))
var testpods [5][3]pingPodResourceNode
for j := 0; j < len(allNS); j++ {
for i := 0; i < len(nodeNames); i++ {
testpods[j][i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i) + "-" + allNS[j],
namespace: allNS[j],
nodename: nodeNames[i],
template: pingPodNodeTemplate,
}
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, allNS[j], testpods[j][i].name)
}
}
svcIP1, svcIP2 := getSvcIP(oc, "default", "kubernetes")
e2e.Logf("k8s service has IP(s) as svcIP1: %s, svcIP2: %s", svcIP1, svcIP2)
var curlCmd string
if svcIP2 != "" {
curlCmdv6 := fmt.Sprintf("curl -I -k -v https://[%s]:443/api?timeout=32s", svcIP1)
curlCmdv4 := fmt.Sprintf("curl -I -k -v https://%s:443/api?timeout=32s", svcIP2)
for j := 0; j < len(allNS); j++ {
for i := 0; i < len(nodeNames); i++ {
_, curlErr := e2eoutput.RunHostCmd(testpods[j][i].namespace, testpods[j][i].name, curlCmdv6)
o.Expect(curlErr).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to curl k8s service from pod %s", testpods[j][i].name))
_, curlErr = e2eoutput.RunHostCmd(testpods[j][i].namespace, testpods[j][i].name, curlCmdv4)
o.Expect(curlErr).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to curl k8s service from pod %s", testpods[j][i].name))
}
}
} else {
curlCmd = fmt.Sprintf("curl -I -k -v https://%s/api?timeout=32s", net.JoinHostPort(svcIP1, "443"))
for j := 0; j < len(allNS); j++ {
for i := 0; i < len(nodeNames); i++ {
_, curlErr := e2eoutput.RunHostCmd(testpods[j][i].namespace, testpods[j][i].name, curlCmd)
o.Expect(curlErr).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to curl k8s service from pod %s", testpods[j][i].name))
}
}
}
})
| |||||
file
|
openshift/openshift-tests-private
|
79f7cb1d-1a65-460e-8e93-e736d2b85476
|
cni_util
|
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
package networking
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
type multihomingNAD struct {
namespace string
nadname string
subnets string
nswithnadname string
excludeSubnets string
topology string
template string
}
type multihomingSharenetNAD struct {
namespace string
nadname string
subnets string
nswithnadname string
excludeSubnets string
topology string
sharenetname string
template string
}
type testMultihomingPod struct {
name string
namespace string
podlabel string
nadname string
podenvname string
nodename string
template string
}
type testMultihomingStaticPod struct {
name string
namespace string
podlabel string
nadname string
podenvname string
nodename string
macaddress string
ipaddress string
template string
}
type multihomingIPBlock struct {
name string
namespace string
cidr string
policyfor string
template string
}
type dualstackNAD struct {
nadname string
namespace string
plugintype string
mode string
ipamtype string
ipv4range string
ipv6range string
ipv4rangestart string
ipv4rangeend string
ipv6rangestart string
ipv6rangeend string
template string
}
type whereaboutsoverlappingIPNAD struct {
nadname string
namespace string
plugintype string
mode string
ipamtype string
ipv4range string
enableoverlapping bool
networkname string
template string
}
type testMultusPod struct {
name string
namespace string
podlabel string
nadname string
podenvname string
nodename string
replicas string
template string
}
type multinetworkipBlockCIDRsDual struct {
name string
namespace string
cidrIpv4 string
cidrIpv6 string
policyfor string
template string
}
func (nad *multihomingNAD) createMultihomingNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NAMESPACE="+nad.namespace, "NADNAME="+nad.nadname, "SUBNETS="+nad.subnets, "NSWITHNADNAME="+nad.nswithnadname, "EXCLUDESUBNETS="+nad.excludeSubnets, "TOPOLOGY="+nad.topology)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to net attach definition %v", nad.nadname))
}
func (nad *multihomingSharenetNAD) createMultihomingSharenetNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NAMESPACE="+nad.namespace, "NADNAME="+nad.nadname, "SUBNETS="+nad.subnets, "NSWITHNADNAME="+nad.nswithnadname, "EXCLUDESUBNETS="+nad.excludeSubnets, "TOPOLOGY="+nad.topology, "SHARENETNAME="+nad.sharenetname)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to net attach definition %v", nad.nadname))
}
func (pod *testMultihomingPod) createTestMultihomingPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "PODLABEL="+pod.podlabel, "NADNAME="+pod.nadname, "PODENVNAME="+pod.podenvname, "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (pod *testMultihomingStaticPod) createTestMultihomingStaticPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "PODLABEL="+pod.podlabel, "NADNAME="+pod.nadname, "PODENVNAME="+pod.podenvname, "NODENAME="+pod.nodename, "MACADDRESS="+pod.macaddress, "IPADDRESS="+pod.ipaddress)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (ipBlock_ingress_policy *multihomingIPBlock) createMultihomingipBlockIngressObject(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_ingress_policy.template, "-p", "NAME="+ipBlock_ingress_policy.name, "NAMESPACE="+ipBlock_ingress_policy.namespace, "CIDR="+ipBlock_ingress_policy.cidr, "POLICYFOR="+ipBlock_ingress_policy.policyfor)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_ingress_policy.name))
}
func checkOVNSwitch(oc *exutil.CLI, nad string, leaderPod string) bool {
listSWCmd := "ovn-nbctl show | grep switch"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", leaderPod, listSWCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
return strings.Contains(listOutput, nad)
}
func checkOVNRouter(oc *exutil.CLI, nad string, leaderPod string) bool {
listSWCmd := "ovn-nbctl show | grep router"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", leaderPod, listSWCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
return strings.Contains(listOutput, nad)
}
func checkNAD(oc *exutil.CLI, ns string, nad string) bool {
nadOutput, nadOutputErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("net-attach-def", "-n", ns).Output()
o.Expect(nadOutputErr).NotTo(o.HaveOccurred())
return strings.Contains(nadOutput, nad)
}
func checkOVNswitchPorts(podName []string, outPut string) bool {
result := true
for _, pod := range podName {
if !strings.Contains(outPut, pod) {
result = false
}
}
return result
}
func CurlMultusPod2PodPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPDst string, outputInt string, podEnvName string) {
output, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+outputInt+" --connect-timeout 5 -s "+net.JoinHostPort(podIPDst, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, podEnvName)).To(o.BeTrue())
}
func CurlMultusPod2PodFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPDst string, outputInt string, podEnvName string) {
output, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+outputInt+" --connect-timeout 5 -s "+net.JoinHostPort(podIPDst, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(output, podEnvName)).NotTo(o.BeTrue())
}
// Using getPodMultiNetworks when pods consume multiple NADs
// Using getPodMultiNetwork when pods consume single NAD
func getPodMultiNetworks(oc *exutil.CLI, namespace string, podName string, netName string) (string, string) {
cmd1 := "ip a sho " + netName + " | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmd2 := "ip a sho " + netName + " | awk 'NR==5{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'"
podv4Output, err := e2eoutput.RunHostCmd(namespace, podName, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
podIPv4 := strings.TrimSpace(podv4Output)
podv6Output, err1 := e2eoutput.RunHostCmd(namespace, podName, cmd2)
o.Expect(err1).NotTo(o.HaveOccurred())
podIPv6 := strings.TrimSpace(podv6Output)
return podIPv4, podIPv6
}
func multihomingBeforeCheck(oc *exutil.CLI, topology string) ([]string, []string, []string, []string, string, string, string) {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2dualstacknetwork"
nsWithnad := ns1 + "/" + nadName
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/24,fd00:dead:beef::0/64",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: topology,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
exutil.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
exutil.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4, "net1", pod1.podenvname)
e2e.Logf("The v6 address of pod1 is: %v", pod1IPv6, "net1", pod1.podenvname)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4, "net1", pod2.podenvname)
e2e.Logf("The v6 address of pod2 is: %v", pod2IPv6, "net1", pod2.podenvname)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, pod3IPv6 := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4, "net1", pod3.podenvname)
e2e.Logf("The v6 address of pod3 is: %v", pod3IPv6, "net1", pod3.podenvname)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).ShouldNot(o.Equal(""))
podName := []string{pod1Name[0], pod2Name[0], pod3Name[0]}
exutil.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv6, "net1", pod2.podenvname)
exutil.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv6, "net1", pod3.podenvname)
exutil.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv6, "net1", pod1.podenvname)
exutil.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv6, "net1", pod3.podenvname)
exutil.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv6, "net1", pod1.podenvname)
exutil.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv6, "net1", pod2.podenvname)
podEnvName := []string{pod1.podenvname, pod2.podenvname, pod3.podenvname}
podIPv4 := []string{pod1IPv4, pod2IPv4, pod3IPv4}
podIPv6 := []string{pod1IPv6, pod2IPv6, pod3IPv6}
return podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns1, nadName
}
func multihomingAfterCheck(oc *exutil.CLI, podName []string, podEnvName []string, podIPv4 []string, podIPv6 []string, ovnMasterPodName string, ns string, nadName string) {
pod1Name := podName[0]
pod2Name := podName[1]
pod3Name := podName[2]
pod1envname := podEnvName[0]
pod2envname := podEnvName[1]
pod3envname := podEnvName[2]
pod1IPv4 := podIPv4[0]
pod2IPv4 := podIPv4[1]
pod3IPv4 := podIPv4[2]
pod1IPv6 := podIPv6[0]
pod2IPv6 := podIPv6[1]
pod3IPv6 := podIPv6[2]
exutil.By("Checking connectivity from pod to pod after deleting")
CurlMultusPod2PodPass(oc, ns, pod1Name, pod2IPv4, "net1", pod2envname)
CurlMultusPod2PodPass(oc, ns, pod1Name, pod2IPv6, "net1", pod2envname)
CurlMultusPod2PodPass(oc, ns, pod1Name, pod3IPv4, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod1Name, pod3IPv6, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod1IPv4, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod1IPv6, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod3IPv4, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod3IPv6, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod1IPv4, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod1IPv6, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod2IPv4, "net1", pod2envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod2IPv6, "net1", pod2envname)
}
// This is for a negtive case which the pods can't be running using the wrong NAD
func getPodMultiNetworkFail(oc *exutil.CLI, namespace string, podName string) {
cmd1 := "ip a sho net1 | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmd2 := "ip a sho net1 | awk 'NR==5{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'"
_, ipv4Err := e2eoutput.RunHostCmd(namespace, podName, cmd1)
o.Expect(ipv4Err).To(o.HaveOccurred())
_, ipv6Err := e2eoutput.RunHostCmd(namespace, podName, cmd2)
o.Expect(ipv6Err).To(o.HaveOccurred())
}
func (nad *dualstackNAD) createDualstackNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NADNAME="+nad.nadname, "NAMESPACE="+nad.namespace, "PLUGINTYPE="+nad.plugintype, "MODE="+nad.mode, "IPAMTYPE="+nad.ipamtype, "IPV4RANGE="+nad.ipv4range, "IPV6RANGE="+nad.ipv6range, "IPV4RANGESTART="+nad.ipv4rangestart, "IPV4RANGEEND="+nad.ipv4rangeend, "IPV6RANGESTART="+nad.ipv6rangestart, "IPV6RANGEEND="+nad.ipv6rangeend)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to net attach definition %v", nad.nadname))
}
func (nad *whereaboutsoverlappingIPNAD) createWhereaboutsoverlappingIPNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NADNAME="+nad.nadname, "NAMESPACE="+nad.namespace, "PLUGINTYPE="+nad.plugintype, "MODE="+nad.mode, "IPAMTYPE="+nad.ipamtype, "IPV4RANGE="+nad.ipv4range, "ENABLEOVERLAPPING="+strconv.FormatBool(nad.enableoverlapping), "NETWORKNAME="+nad.networkname)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create the net-attach-definition %v", nad.nadname))
}
func (pod *testMultusPod) createTestMultusPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "PODLABEL="+pod.podlabel, "NADNAME="+pod.nadname, "PODENVNAME="+pod.podenvname, "NODENAME="+pod.nodename, "REPLICAS="+pod.replicas)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (multinetworkipBlock_policy *multinetworkipBlockCIDRsDual) createMultinetworkipBlockCIDRDual(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", multinetworkipBlock_policy.template, "-p", "NAME="+multinetworkipBlock_policy.name, "NAMESPACE="+multinetworkipBlock_policy.namespace, "CIDRIPV6="+multinetworkipBlock_policy.cidrIpv6, "CIDRIPV4="+multinetworkipBlock_policy.cidrIpv4, "POLICYFOR="+multinetworkipBlock_policy.policyfor)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", multinetworkipBlock_policy.name))
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
71d3acab-17b5-4188-ade4-fe92ac968ca9
|
createMultihomingNAD
|
['"fmt"', '"net"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['multihomingNAD']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (nad *multihomingNAD) createMultihomingNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NAMESPACE="+nad.namespace, "NADNAME="+nad.nadname, "SUBNETS="+nad.subnets, "NSWITHNADNAME="+nad.nswithnadname, "EXCLUDESUBNETS="+nad.excludeSubnets, "TOPOLOGY="+nad.topology)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to net attach definition %v", nad.nadname))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
d9bb90fd-604b-41ee-83a0-4235e6d2cdf2
|
createMultihomingSharenetNAD
|
['"fmt"', '"net"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['multihomingSharenetNAD']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (nad *multihomingSharenetNAD) createMultihomingSharenetNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NAMESPACE="+nad.namespace, "NADNAME="+nad.nadname, "SUBNETS="+nad.subnets, "NSWITHNADNAME="+nad.nswithnadname, "EXCLUDESUBNETS="+nad.excludeSubnets, "TOPOLOGY="+nad.topology, "SHARENETNAME="+nad.sharenetname)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to net attach definition %v", nad.nadname))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ad67901f-0498-4f3d-b9cf-6e7156ca1edd
|
createTestMultihomingPod
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['testMultihomingPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (pod *testMultihomingPod) createTestMultihomingPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "PODLABEL="+pod.podlabel, "NADNAME="+pod.nadname, "PODENVNAME="+pod.podenvname, "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
0ecdbb2e-980d-45cd-a013-24fe304793ec
|
createTestMultihomingStaticPod
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['testMultihomingStaticPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (pod *testMultihomingStaticPod) createTestMultihomingStaticPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "PODLABEL="+pod.podlabel, "NADNAME="+pod.nadname, "PODENVNAME="+pod.podenvname, "NODENAME="+pod.nodename, "MACADDRESS="+pod.macaddress, "IPADDRESS="+pod.ipaddress)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
3ad7ee8e-b115-4fce-ad59-ab72abca4e3e
|
createMultihomingipBlockIngressObject
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['multihomingIPBlock']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (ipBlock_ingress_policy *multihomingIPBlock) createMultihomingipBlockIngressObject(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_ingress_policy.template, "-p", "NAME="+ipBlock_ingress_policy.name, "NAMESPACE="+ipBlock_ingress_policy.namespace, "CIDR="+ipBlock_ingress_policy.cidr, "POLICYFOR="+ipBlock_ingress_policy.policyfor)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_ingress_policy.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
71807f75-6b0d-4bc6-a23b-0898b2fac3a7
|
checkOVNSwitch
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func checkOVNSwitch(oc *exutil.CLI, nad string, leaderPod string) bool {
listSWCmd := "ovn-nbctl show | grep switch"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", leaderPod, listSWCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
return strings.Contains(listOutput, nad)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
5b9d0de7-f505-4c14-93f2-f059d7747038
|
checkOVNRouter
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func checkOVNRouter(oc *exutil.CLI, nad string, leaderPod string) bool {
listSWCmd := "ovn-nbctl show | grep router"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", leaderPod, listSWCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
return strings.Contains(listOutput, nad)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d743fda0-1ee5-4141-9591-123f66d506d7
|
checkNAD
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func checkNAD(oc *exutil.CLI, ns string, nad string) bool {
nadOutput, nadOutputErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("net-attach-def", "-n", ns).Output()
o.Expect(nadOutputErr).NotTo(o.HaveOccurred())
return strings.Contains(nadOutput, nad)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8543336f-8d0b-4103-8e5b-8067ba0d8312
|
checkOVNswitchPorts
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func checkOVNswitchPorts(podName []string, outPut string) bool {
result := true
for _, pod := range podName {
if !strings.Contains(outPut, pod) {
result = false
}
}
return result
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4d62ad2e-269e-470a-8f24-a30059013aaa
|
CurlMultusPod2PodPass
|
['"net"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func CurlMultusPod2PodPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPDst string, outputInt string, podEnvName string) {
output, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+outputInt+" --connect-timeout 5 -s "+net.JoinHostPort(podIPDst, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, podEnvName)).To(o.BeTrue())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
cddfb151-3380-482d-93a5-53ece7755d55
|
CurlMultusPod2PodFail
|
['"net"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func CurlMultusPod2PodFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPDst string, outputInt string, podEnvName string) {
output, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+outputInt+" --connect-timeout 5 -s "+net.JoinHostPort(podIPDst, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(output, podEnvName)).NotTo(o.BeTrue())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
247a61af-43d4-4a95-8a72-c7db94c86c4b
|
getPodMultiNetworks
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func getPodMultiNetworks(oc *exutil.CLI, namespace string, podName string, netName string) (string, string) {
cmd1 := "ip a sho " + netName + " | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmd2 := "ip a sho " + netName + " | awk 'NR==5{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'"
podv4Output, err := e2eoutput.RunHostCmd(namespace, podName, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
podIPv4 := strings.TrimSpace(podv4Output)
podv6Output, err1 := e2eoutput.RunHostCmd(namespace, podName, cmd2)
o.Expect(err1).NotTo(o.HaveOccurred())
podIPv6 := strings.TrimSpace(podv6Output)
return podIPv4, podIPv6
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
355a07cc-2762-436a-a15d-870633d351f7
|
multihomingBeforeCheck
|
['"context"', '"path/filepath"']
|
['multihomingNAD', 'testMultihomingPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func multihomingBeforeCheck(oc *exutil.CLI, topology string) ([]string, []string, []string, []string, string, string, string) {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/multihoming")
multihomingNADTemplate = filepath.Join(buildPruningBaseDir, "multihoming-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Create a test namespace")
ns1 := oc.Namespace()
nadName := "layer2dualstacknetwork"
nsWithnad := ns1 + "/" + nadName
exutil.By("Create a custom resource network-attach-defintion in tested namespace")
nad1ns1 := multihomingNAD{
namespace: ns1,
nadname: nadName,
subnets: "192.168.100.0/24,fd00:dead:beef::0/64",
nswithnadname: nsWithnad,
excludeSubnets: "",
topology: topology,
template: multihomingNADTemplate,
}
nad1ns1.createMultihomingNAD(oc)
exutil.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "multihoming-pod-1",
namespace: ns1,
podlabel: "multihoming-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-1",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod1")).NotTo(o.HaveOccurred())
exutil.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "multihoming-pod-2",
namespace: ns1,
podlabel: "multihoming-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "Hello multihoming-pod-2",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod2")).NotTo(o.HaveOccurred())
exutil.By("Create 3rd pod consuming above network-attach-defintion in ns1")
pod3 := testMultihomingPod{
name: "multihoming-pod-3",
namespace: ns1,
podlabel: "multihoming-pod3",
nadname: nadName,
nodename: nodeList.Items[1].Name,
podenvname: "Hello multihoming-pod-3",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=multihoming-pod3")).NotTo(o.HaveOccurred())
exutil.By("Get IPs from the pod1's secondary interface")
pod1Name := getPodName(oc, ns1, "name=multihoming-pod1")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns1, pod1Name[0])
e2e.Logf("The v4 address of pod1 is: %v", pod1IPv4, "net1", pod1.podenvname)
e2e.Logf("The v6 address of pod1 is: %v", pod1IPv6, "net1", pod1.podenvname)
exutil.By("Get IPs from the pod2's secondary interface")
pod2Name := getPodName(oc, ns1, "name=multihoming-pod2")
pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns1, pod2Name[0])
e2e.Logf("The v4 address of pod2 is: %v", pod2IPv4, "net1", pod2.podenvname)
e2e.Logf("The v6 address of pod2 is: %v", pod2IPv6, "net1", pod2.podenvname)
exutil.By("Get IPs from the pod3's secondary interface")
pod3Name := getPodName(oc, ns1, "name=multihoming-pod3")
pod3IPv4, pod3IPv6 := getPodMultiNetwork(oc, ns1, pod3Name[0])
e2e.Logf("The v4 address of pod3 is: %v", pod3IPv4, "net1", pod3.podenvname)
e2e.Logf("The v6 address of pod3 is: %v", pod3IPv6, "net1", pod3.podenvname)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).ShouldNot(o.Equal(""))
podName := []string{pod1Name[0], pod2Name[0], pod3Name[0]}
exutil.By("Checking connectivity from pod1 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod2IPv6, "net1", pod2.podenvname)
exutil.By("Checking connectivity from pod1 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod1Name[0], pod3IPv6, "net1", pod3.podenvname)
exutil.By("Checking connectivity from pod2 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod1IPv6, "net1", pod1.podenvname)
exutil.By("Checking connectivity from pod2 to pod3")
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv4, "net1", pod3.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod2Name[0], pod3IPv6, "net1", pod3.podenvname)
exutil.By("Checking connectivity from pod3 to pod1")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv4, "net1", pod1.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod1IPv6, "net1", pod1.podenvname)
exutil.By("Checking connectivity from pod3 to pod2")
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv4, "net1", pod2.podenvname)
CurlMultusPod2PodPass(oc, ns1, pod3Name[0], pod2IPv6, "net1", pod2.podenvname)
podEnvName := []string{pod1.podenvname, pod2.podenvname, pod3.podenvname}
podIPv4 := []string{pod1IPv4, pod2IPv4, pod3IPv4}
podIPv6 := []string{pod1IPv6, pod2IPv6, pod3IPv6}
return podName, podEnvName, podIPv4, podIPv6, ovnMasterPodName, ns1, nadName
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ef92d517-2ba7-4844-9725-2e971416901c
|
multihomingAfterCheck
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func multihomingAfterCheck(oc *exutil.CLI, podName []string, podEnvName []string, podIPv4 []string, podIPv6 []string, ovnMasterPodName string, ns string, nadName string) {
pod1Name := podName[0]
pod2Name := podName[1]
pod3Name := podName[2]
pod1envname := podEnvName[0]
pod2envname := podEnvName[1]
pod3envname := podEnvName[2]
pod1IPv4 := podIPv4[0]
pod2IPv4 := podIPv4[1]
pod3IPv4 := podIPv4[2]
pod1IPv6 := podIPv6[0]
pod2IPv6 := podIPv6[1]
pod3IPv6 := podIPv6[2]
exutil.By("Checking connectivity from pod to pod after deleting")
CurlMultusPod2PodPass(oc, ns, pod1Name, pod2IPv4, "net1", pod2envname)
CurlMultusPod2PodPass(oc, ns, pod1Name, pod2IPv6, "net1", pod2envname)
CurlMultusPod2PodPass(oc, ns, pod1Name, pod3IPv4, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod1Name, pod3IPv6, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod1IPv4, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod1IPv6, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod3IPv4, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod2Name, pod3IPv6, "net1", pod3envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod1IPv4, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod1IPv6, "net1", pod1envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod2IPv4, "net1", pod2envname)
CurlMultusPod2PodPass(oc, ns, pod3Name, pod2IPv6, "net1", pod2envname)
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
da7943df-23a6-4a78-a62a-a005d71abb4c
|
getPodMultiNetworkFail
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func getPodMultiNetworkFail(oc *exutil.CLI, namespace string, podName string) {
cmd1 := "ip a sho net1 | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmd2 := "ip a sho net1 | awk 'NR==5{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'"
_, ipv4Err := e2eoutput.RunHostCmd(namespace, podName, cmd1)
o.Expect(ipv4Err).To(o.HaveOccurred())
_, ipv6Err := e2eoutput.RunHostCmd(namespace, podName, cmd2)
o.Expect(ipv6Err).To(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
19a6e832-925c-4bb1-a561-5af7f32f83e6
|
createDualstackNAD
|
['"fmt"', '"net"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['dualstackNAD']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (nad *dualstackNAD) createDualstackNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NADNAME="+nad.nadname, "NAMESPACE="+nad.namespace, "PLUGINTYPE="+nad.plugintype, "MODE="+nad.mode, "IPAMTYPE="+nad.ipamtype, "IPV4RANGE="+nad.ipv4range, "IPV6RANGE="+nad.ipv6range, "IPV4RANGESTART="+nad.ipv4rangestart, "IPV4RANGEEND="+nad.ipv4rangeend, "IPV6RANGESTART="+nad.ipv6rangestart, "IPV6RANGEEND="+nad.ipv6rangeend)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to net attach definition %v", nad.nadname))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
6c306ea7-4c68-47a2-8259-09fc9fef7ed7
|
createWhereaboutsoverlappingIPNAD
|
['"fmt"', '"net"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['whereaboutsoverlappingIPNAD']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (nad *whereaboutsoverlappingIPNAD) createWhereaboutsoverlappingIPNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NADNAME="+nad.nadname, "NAMESPACE="+nad.namespace, "PLUGINTYPE="+nad.plugintype, "MODE="+nad.mode, "IPAMTYPE="+nad.ipamtype, "IPV4RANGE="+nad.ipv4range, "ENABLEOVERLAPPING="+strconv.FormatBool(nad.enableoverlapping), "NETWORKNAME="+nad.networkname)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create the net-attach-definition %v", nad.nadname))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
a37620e6-ac83-4e00-8cac-cb63182b9eea
|
createTestMultusPod
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['testMultusPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (pod *testMultusPod) createTestMultusPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "PODLABEL="+pod.podlabel, "NADNAME="+pod.nadname, "PODENVNAME="+pod.podenvname, "NODENAME="+pod.nodename, "REPLICAS="+pod.replicas)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
5df152a8-d875-49c3-b04f-45f1cb2cce4a
|
createMultinetworkipBlockCIDRDual
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['multinetworkipBlockCIDRsDual']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cni_util.go
|
func (multinetworkipBlock_policy *multinetworkipBlockCIDRsDual) createMultinetworkipBlockCIDRDual(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", multinetworkipBlock_policy.template, "-p", "NAME="+multinetworkipBlock_policy.name, "NAMESPACE="+multinetworkipBlock_policy.namespace, "CIDRIPV6="+multinetworkipBlock_policy.cidrIpv6, "CIDRIPV4="+multinetworkipBlock_policy.cidrIpv4, "POLICYFOR="+multinetworkipBlock_policy.policyfor)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", multinetworkipBlock_policy.name))
}
|
networking
| |||
test
|
openshift/openshift-tests-private
|
1acc1c38-cc40-460b-a2b2-5a702a1abb1e
|
cno
|
import (
"context"
"fmt"
"os"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/cno.go
|
package networking
import (
"context"
"fmt"
"os"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = g.Describe("[sig-networking] SDN CNO", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-cno", exutil.KubeConfigPath())
diagNamespace = "openshift-network-diagnostics"
restoreCmd = `[{"op":"replace","path":"/spec/networkDiagnostics","value":{"mode":"","sourcePlacement":{},"targetPlacement":{}}}]`
)
g.BeforeEach(func() {
err := exutil.CheckNetworkOperatorStatus(oc)
if err != nil {
g.Skip("The Cluster Network Operator is already not in normal status, skip networkDiagnostics test cases!!!")
}
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-Critical-72348-Configure networkDiagnostics for both network-check-source and network-check-target. [Disruptive]", func() {
workers, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workers) < 2 {
g.Skip("No enough workers, skip the tests")
}
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Add a label to one worker node.")
defer exutil.DeleteLabelFromNode(oc, workers[0].Name, "net-diag-test-source")
exutil.AddLabelToNode(oc, workers[0].Name, "net-diag-test-source", "ocp72348")
exutil.By("Configure networkDiagnostics to match the label")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), "networkDiagnostics pods are not recovered as default.")
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux",
"net-diag-test-source": "ocp72348"
}
},
"targetPlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux"
},
"tolerations": [
{
"operator": "Exists"
}
]
}
}
}
}
`
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify network-check-source pod deployed to the labeled node.")
o.Eventually(func() bool {
var nodeName string
networkCheckSourcePod, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-source")
o.Expect(err).NotTo(o.HaveOccurred())
if len(networkCheckSourcePod) == 0 {
// In case when the pod started, it might have a time just pod got terminiated but not started yet.
nodeName = ""
} else {
nodeName, _ = exutil.GetPodNodeName(oc, diagNamespace, networkCheckSourcePod[0])
}
e2e.Logf("Currently the network-check-source pod's node is %s,expected node is %s", nodeName, workers[0].Name)
return nodeName == workers[0].Name
}, "300s", "10s").Should(o.BeTrue(), "network-check-source pod was not deployed to labeled node.")
exutil.By("Verify network-check-target pod deployed to all linux nodes.")
o.Eventually(func() bool {
networkCheckTargetPods, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
o.Expect(err).NotTo(o.HaveOccurred())
workers, err := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(err).NotTo(o.HaveOccurred())
return len(networkCheckTargetPods) == len(workers)
}, "300s", "10s").Should(o.BeTrue(), "network-check-target pods were not deployed to all linux nodes..")
exutil.By("Add a label to second worker node ")
defer exutil.DeleteLabelFromNode(oc, workers[1].Name, "net-diag-test-target")
exutil.AddLabelToNode(oc, workers[1].Name, "net-diag-test-target", "ocp72348")
exutil.By("Configure networkDiagnostics to match the label")
patchCmd = `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux",
"net-diag-test-source": "ocp72348"
}
},
"targetPlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux",
"net-diag-test-target": "ocp72348"
},
"tolerations": [
{
"operator": "Exists"
}
]
}
}
}
}
`
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify only one network-check-target pod is deployed to the labeled node.")
o.Eventually(func() bool {
networkCheckTargetPods, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
o.Expect(err).NotTo(o.HaveOccurred())
var nodeName string
if len(networkCheckTargetPods) == 0 {
nodeName = ""
} else {
nodeName, _ = exutil.GetPodNodeName(oc, diagNamespace, networkCheckTargetPods[0])
}
e2e.Logf("Currently the network-check-target pod's node is %s,expected node is %s", nodeName, workers[0].Name)
return len(networkCheckTargetPods) == 1 && nodeName == workers[1].Name
}, "300s", "10s").Should(o.BeTrue(), "network-check-target pod was not deployed to the node with correct label.")
exutil.By("Verify PodNetworkConnectivityCheck has only one network-check-source-to-network-check-target")
o.Eventually(func() bool {
podNetworkConnectivityCheck, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodNetworkConnectivityCheck", "-n", diagNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(podNetworkConnectivityCheck)
regexStr := "network-check-source.*network-check-target.*"
r := regexp.MustCompile(regexStr)
matches := r.FindAllString(podNetworkConnectivityCheck, -1)
return len(matches) == 1
}, "300s", "10s").Should(o.BeTrue(), "The number of network-check-source.*network-check-target.* was not 1.")
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-Medium-72349-No matching node for sourcePlacement or targePlacement. [Disruptive]", func() {
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure networkDiagnostics sourcePlacement with label that no node has")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), "networkDiagnostics pods are not recovered as default.")
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": {
"nodeSelector": {
"net-diag-node-placement-72349": ""
}
}
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify network-check-source pod is in pending status.")
o.Eventually(func() bool {
networkCheckSourcePod, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-source")
o.Expect(err).NotTo(o.HaveOccurred())
var status string
if len(networkCheckSourcePod) == 0 {
status = ""
} else {
status, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", diagNamespace, networkCheckSourcePod[0], "-o=jsonpath={.status.phase}").Output()
}
e2e.Logf("Current pod status is %s,expected status is %s", status, "Pending")
return status == "Pending"
}, "300s", "10s").Should(o.BeTrue(), "network-check-source pod was not in pending status.")
exutil.By("Verify NetworkDiagnosticsAvailable status is false.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "false"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in false status")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure networkDiagnostics targetPlacement with label that no node has")
patchCmd = `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": null,
"targetPlacement": {
"nodeSelector": {
"net-diag-node-placement-72349": ""
}
}
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify all network-check-target pod gone ")
o.Eventually(func() bool {
networkCheckTargetPods, _ := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
return len(networkCheckTargetPods) == 0
}, "300s", "10s").Should(o.BeTrue(), "network-check-target pods are not terminated.")
exutil.By("Verify NetworkDiagnosticsAvailable status is true.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "true"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in true status")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-Medium-72351-Low-73365-mode of networkDiagnostics is Disabled,invalid mode will not be accepted. [Disruptive]", func() {
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure networkDiagnostics sourcePlacement with label that no node has")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
var recNetworkdDiagPods []string
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), fmt.Sprintf("Original networkDiagnostics pods number is %v, current number is %v,", len(networkdDiagPods), len(recNetworkdDiagPods)))
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "Disabled",
"sourcePlacement": null,
"targetPlacement": null
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify that neither the network-check-source pod nor the network-check-target pod is placed. ")
o.Eventually(func() bool {
networkCheckSourcePod, _ := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-source")
networkCheckTargetPods, _ := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
return len(networkCheckTargetPods) == 0 && len(networkCheckSourcePod) == 0
}, "300s", "10s").Should(o.BeTrue(), "There is still network-check-source or network-check-target pod placed")
exutil.By("Verify NetworkDiagnosticsAvailable status is false.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "false"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in false status")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify no PodNetworkConnectivityCheck created ")
o.Eventually(func() bool {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodNetworkConnectivityCheck", "-n", diagNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(output)
return strings.Contains(output, "No resources found")
}, "300s", "10s").Should(o.BeTrue(), "PodNetworkConnectivityCheck is still there.")
exutil.By("Verify invalid mode is not accepted")
patchCmd = `{ "spec":{
"networkDiagnostics": {
"mode": "test-invalid"
}
}
}`
output, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "-p", patchCmd, "--type=merge").Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(output, "Unsupported value: \"test-invalid\"")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-Medium-73367-Configure disableNetworkDiagnostics and networkDiagnostics. [Disruptive]", func() {
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure spec.disableNetworkDiagnostics=true in network.operator")
defer func() {
patchResourceAsAdmin(oc, "network.operator/cluster", "{\"spec\":{\"disableNetworkDiagnostics\": false}}")
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), "networkDiagnostics pods are not recovered as default.")
}()
patchResourceAsAdmin(oc, "network.operator/cluster", "{\"spec\":{\"disableNetworkDiagnostics\": true}}")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NetworkDiagnosticsAvailable status is false.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "false"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in false status")
exutil.By("Configure networkDiagnostics")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": null,
"targetPlacement": null
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NetworkDiagnosticsAvailable status is true.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "true"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in true status")
})
// author: [email protected]
g.It("Author:meinli-NonPreRelease-Medium-51727-ovsdb-server and northd should not core dump on node restart [Disruptive]", func() {
// https://bugzilla.redhat.com/show_bug.cgi?id=1944264
exutil.By("1. Get one node to reboot")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 1 {
g.Skip("This case requires 1 nodes, but the cluster has none. Skip it!!!")
}
worker := workerList.Items[0].Name
defer checkNodeStatus(oc, worker, "Ready")
rebootNode(oc, worker)
checkNodeStatus(oc, worker, "NotReady")
checkNodeStatus(oc, worker, "Ready")
exutil.By("2. Check the node core dump output")
mustgatherDir := "/tmp/must-gather-51727"
defer os.RemoveAll(mustgatherDir)
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir="+mustgatherDir, "--", "/usr/bin/gather_core_dumps").Output()
o.Expect(err).NotTo(o.HaveOccurred())
files, err := os.ReadDir(mustgatherDir + "/quay-io-openshift-release-dev-ocp-*" + "/node_core_dumps")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(files).Should(o.BeEmpty())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
5cc74bc5-9721-4488-bd09-e3928b2ecc2e
|
Author:huirwang-NonHyperShiftHOST-Critical-72348-Configure networkDiagnostics for both network-check-source and network-check-target. [Disruptive]
|
['"os"', '"regexp"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cno.go
|
g.It("Author:huirwang-NonHyperShiftHOST-Critical-72348-Configure networkDiagnostics for both network-check-source and network-check-target. [Disruptive]", func() {
workers, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workers) < 2 {
g.Skip("No enough workers, skip the tests")
}
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Add a label to one worker node.")
defer exutil.DeleteLabelFromNode(oc, workers[0].Name, "net-diag-test-source")
exutil.AddLabelToNode(oc, workers[0].Name, "net-diag-test-source", "ocp72348")
exutil.By("Configure networkDiagnostics to match the label")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), "networkDiagnostics pods are not recovered as default.")
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux",
"net-diag-test-source": "ocp72348"
}
},
"targetPlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux"
},
"tolerations": [
{
"operator": "Exists"
}
]
}
}
}
}
`
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify network-check-source pod deployed to the labeled node.")
o.Eventually(func() bool {
var nodeName string
networkCheckSourcePod, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-source")
o.Expect(err).NotTo(o.HaveOccurred())
if len(networkCheckSourcePod) == 0 {
// In case when the pod started, it might have a time just pod got terminiated but not started yet.
nodeName = ""
} else {
nodeName, _ = exutil.GetPodNodeName(oc, diagNamespace, networkCheckSourcePod[0])
}
e2e.Logf("Currently the network-check-source pod's node is %s,expected node is %s", nodeName, workers[0].Name)
return nodeName == workers[0].Name
}, "300s", "10s").Should(o.BeTrue(), "network-check-source pod was not deployed to labeled node.")
exutil.By("Verify network-check-target pod deployed to all linux nodes.")
o.Eventually(func() bool {
networkCheckTargetPods, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
o.Expect(err).NotTo(o.HaveOccurred())
workers, err := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(err).NotTo(o.HaveOccurred())
return len(networkCheckTargetPods) == len(workers)
}, "300s", "10s").Should(o.BeTrue(), "network-check-target pods were not deployed to all linux nodes..")
exutil.By("Add a label to second worker node ")
defer exutil.DeleteLabelFromNode(oc, workers[1].Name, "net-diag-test-target")
exutil.AddLabelToNode(oc, workers[1].Name, "net-diag-test-target", "ocp72348")
exutil.By("Configure networkDiagnostics to match the label")
patchCmd = `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux",
"net-diag-test-source": "ocp72348"
}
},
"targetPlacement": {
"nodeSelector": {
"kubernetes.io/os": "linux",
"net-diag-test-target": "ocp72348"
},
"tolerations": [
{
"operator": "Exists"
}
]
}
}
}
}
`
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify only one network-check-target pod is deployed to the labeled node.")
o.Eventually(func() bool {
networkCheckTargetPods, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
o.Expect(err).NotTo(o.HaveOccurred())
var nodeName string
if len(networkCheckTargetPods) == 0 {
nodeName = ""
} else {
nodeName, _ = exutil.GetPodNodeName(oc, diagNamespace, networkCheckTargetPods[0])
}
e2e.Logf("Currently the network-check-target pod's node is %s,expected node is %s", nodeName, workers[0].Name)
return len(networkCheckTargetPods) == 1 && nodeName == workers[1].Name
}, "300s", "10s").Should(o.BeTrue(), "network-check-target pod was not deployed to the node with correct label.")
exutil.By("Verify PodNetworkConnectivityCheck has only one network-check-source-to-network-check-target")
o.Eventually(func() bool {
podNetworkConnectivityCheck, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodNetworkConnectivityCheck", "-n", diagNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(podNetworkConnectivityCheck)
regexStr := "network-check-source.*network-check-target.*"
r := regexp.MustCompile(regexStr)
matches := r.FindAllString(podNetworkConnectivityCheck, -1)
return len(matches) == 1
}, "300s", "10s").Should(o.BeTrue(), "The number of network-check-source.*network-check-target.* was not 1.")
})
| |||||
test case
|
openshift/openshift-tests-private
|
3d41606a-7b16-4f29-ae1f-f9139ce20ef0
|
Author:huirwang-NonHyperShiftHOST-Medium-72349-No matching node for sourcePlacement or targePlacement. [Disruptive]
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cno.go
|
g.It("Author:huirwang-NonHyperShiftHOST-Medium-72349-No matching node for sourcePlacement or targePlacement. [Disruptive]", func() {
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure networkDiagnostics sourcePlacement with label that no node has")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), "networkDiagnostics pods are not recovered as default.")
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": {
"nodeSelector": {
"net-diag-node-placement-72349": ""
}
}
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify network-check-source pod is in pending status.")
o.Eventually(func() bool {
networkCheckSourcePod, err := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-source")
o.Expect(err).NotTo(o.HaveOccurred())
var status string
if len(networkCheckSourcePod) == 0 {
status = ""
} else {
status, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", diagNamespace, networkCheckSourcePod[0], "-o=jsonpath={.status.phase}").Output()
}
e2e.Logf("Current pod status is %s,expected status is %s", status, "Pending")
return status == "Pending"
}, "300s", "10s").Should(o.BeTrue(), "network-check-source pod was not in pending status.")
exutil.By("Verify NetworkDiagnosticsAvailable status is false.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "false"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in false status")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure networkDiagnostics targetPlacement with label that no node has")
patchCmd = `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": null,
"targetPlacement": {
"nodeSelector": {
"net-diag-node-placement-72349": ""
}
}
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify all network-check-target pod gone ")
o.Eventually(func() bool {
networkCheckTargetPods, _ := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
return len(networkCheckTargetPods) == 0
}, "300s", "10s").Should(o.BeTrue(), "network-check-target pods are not terminated.")
exutil.By("Verify NetworkDiagnosticsAvailable status is true.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "true"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in true status")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
a59b2227-a501-459e-99f3-2c5ff65853bc
|
Author:huirwang-NonHyperShiftHOST-Medium-72351-Low-73365-mode of networkDiagnostics is Disabled,invalid mode will not be accepted. [Disruptive]
|
['"fmt"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cno.go
|
g.It("Author:huirwang-NonHyperShiftHOST-Medium-72351-Low-73365-mode of networkDiagnostics is Disabled,invalid mode will not be accepted. [Disruptive]", func() {
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure networkDiagnostics sourcePlacement with label that no node has")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
var recNetworkdDiagPods []string
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), fmt.Sprintf("Original networkDiagnostics pods number is %v, current number is %v,", len(networkdDiagPods), len(recNetworkdDiagPods)))
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "Disabled",
"sourcePlacement": null,
"targetPlacement": null
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify that neither the network-check-source pod nor the network-check-target pod is placed. ")
o.Eventually(func() bool {
networkCheckSourcePod, _ := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-source")
networkCheckTargetPods, _ := exutil.GetAllPodsWithLabel(oc, diagNamespace, "app=network-check-target")
return len(networkCheckTargetPods) == 0 && len(networkCheckSourcePod) == 0
}, "300s", "10s").Should(o.BeTrue(), "There is still network-check-source or network-check-target pod placed")
exutil.By("Verify NetworkDiagnosticsAvailable status is false.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "false"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in false status")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify no PodNetworkConnectivityCheck created ")
o.Eventually(func() bool {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodNetworkConnectivityCheck", "-n", diagNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(output)
return strings.Contains(output, "No resources found")
}, "300s", "10s").Should(o.BeTrue(), "PodNetworkConnectivityCheck is still there.")
exutil.By("Verify invalid mode is not accepted")
patchCmd = `{ "spec":{
"networkDiagnostics": {
"mode": "test-invalid"
}
}
}`
output, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "-p", patchCmd, "--type=merge").Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(output, "Unsupported value: \"test-invalid\"")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
14421ee8-81df-4ac8-846c-8ab8a52d3960
|
Author:huirwang-NonHyperShiftHOST-Medium-73367-Configure disableNetworkDiagnostics and networkDiagnostics. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/cno.go
|
g.It("Author:huirwang-NonHyperShiftHOST-Medium-73367-Configure disableNetworkDiagnostics and networkDiagnostics. [Disruptive]", func() {
exutil.By("Get default networkDiagnostics pods. ")
networkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configure spec.disableNetworkDiagnostics=true in network.operator")
defer func() {
patchResourceAsAdmin(oc, "network.operator/cluster", "{\"spec\":{\"disableNetworkDiagnostics\": false}}")
err := exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
recNetworkdDiagPods, err := exutil.GetAllPods(oc, diagNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
return len(recNetworkdDiagPods) == len(networkdDiagPods)
}, "300s", "10s").Should(o.BeTrue(), "networkDiagnostics pods are not recovered as default.")
}()
patchResourceAsAdmin(oc, "network.operator/cluster", "{\"spec\":{\"disableNetworkDiagnostics\": true}}")
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NetworkDiagnosticsAvailable status is false.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "false"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in false status")
exutil.By("Configure networkDiagnostics")
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("Network.config.openshift.io/cluster", "--type=json", "-p", restoreCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
patchCmd := `{ "spec":{
"networkDiagnostics": {
"mode": "All",
"sourcePlacement": null,
"targetPlacement": null
}
}
}`
e2e.Logf("networkDiagnostics config command is: %s\n", patchCmd)
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchCmd)
exutil.By("Verify CNO is not in degraded status")
err = exutil.CheckNetworkOperatorStatus(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NetworkDiagnosticsAvailable status is true.")
o.Eventually(func() bool {
status := getNetworkDiagnosticsAvailable(oc)
return status == "true"
}, "300s", "10s").Should(o.BeTrue(), "NetworkDiagnosticsAvailable is not in true status")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
044df302-ef16-47c3-b774-f19e7fc2e5ae
|
Author:meinli-NonPreRelease-Medium-51727-ovsdb-server and northd should not core dump on node restart [Disruptive]
|
['"context"', '"os"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cno.go
|
g.It("Author:meinli-NonPreRelease-Medium-51727-ovsdb-server and northd should not core dump on node restart [Disruptive]", func() {
// https://bugzilla.redhat.com/show_bug.cgi?id=1944264
exutil.By("1. Get one node to reboot")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 1 {
g.Skip("This case requires 1 nodes, but the cluster has none. Skip it!!!")
}
worker := workerList.Items[0].Name
defer checkNodeStatus(oc, worker, "Ready")
rebootNode(oc, worker)
checkNodeStatus(oc, worker, "NotReady")
checkNodeStatus(oc, worker, "Ready")
exutil.By("2. Check the node core dump output")
mustgatherDir := "/tmp/must-gather-51727"
defer os.RemoveAll(mustgatherDir)
_, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir="+mustgatherDir, "--", "/usr/bin/gather_core_dumps").Output()
o.Expect(err).NotTo(o.HaveOccurred())
files, err := os.ReadDir(mustgatherDir + "/quay-io-openshift-release-dev-ocp-*" + "/node_core_dumps")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(files).Should(o.BeEmpty())
})
| |||||
test
|
openshift/openshift-tests-private
|
331a63d3-789a-4ea7-9c1e-1bedeb89e4fd
|
egressfirewall
|
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/template"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
package networking
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/template"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN egressfirewall", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-egressfirewall", exutil.KubeConfigPath())
var aclLogPath = "--path=ovn/acl-audit-log.log"
g.BeforeEach(func() {
networkType := exutil.CheckNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("This case requires OVNKubernetes as network plugin, skip the test as the cluster does not have OVN network plugin")
}
if checkProxy(oc) {
g.Skip("This is proxy cluster, egressfirewall cannot be tested on proxy cluster, skip the test.")
}
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-High-53223-Verify ACL audit logs can be generated for traffic hit EgressFirewall rules.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("2. Enable ACL looging on the namespace ns1 \n")
enableACLOnNamespace(oc, ns1, "info", "info")
exutil.By("3. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("4. Create an EgressFirewall \n")
egressFW1 := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate,
}
egressFW1.createEgressFWObject1(oc)
efErr := waitEgressFirewallApplied(oc, egressFW1.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("5. Check www.test.com is blocked \n")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.test.com --connect-timeout 5")
return err
}, "60s", "10s").Should(o.HaveOccurred())
exutil.By("6. Check www.redhat.com is allowed \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).ToNot(o.HaveOccurred())
exutil.By("7. Verify acl logs for egressfirewall generated. \n")
egressFwRegex := fmt.Sprintf("EF:%s:.*", ns1)
aclLogs, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
r := regexp.MustCompile(egressFwRegex)
matches := r.FindAllString(aclLogs, -1)
matched1, matchErr1 := regexp.MatchString(egressFwRegex+"verdict=drop, severity=info", aclLogs)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched1).To(o.BeTrue(), fmt.Sprintf("The egressfirewall acllogs were not generated as expected, acl logs for this namespace %s,are: \n %s", ns1, matches))
matched2, matchErr2 := regexp.MatchString(egressFwRegex+"verdict=allow, severity=info", aclLogs)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched2).To(o.BeTrue(), fmt.Sprintf("The egressfirewall acllogs were not generated as expected, acl logs for this namespace %s,are: \n %s", ns1, matches))
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-Medium-53224-Disable and enable acl logging for EgressFirewall.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("2. Enable ACL looging on the namespace ns1 \n")
enableACLOnNamespace(oc, ns1, "info", "info")
exutil.By("3. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("4. Create an EgressFirewall \n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns1,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
err = waitEgressFirewallApplied(oc, egressFW2.name, ns1)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
exutil.By("6. Verify acl logs for egressfirewall generated. \n")
egressFwRegex := fmt.Sprintf("EF:%s:.*", ns1)
aclLogs, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
r := regexp.MustCompile(egressFwRegex)
matches := r.FindAllString(aclLogs, -1)
aclLogNum := len(matches)
o.Expect(aclLogNum > 0).To(o.BeTrue(), fmt.Sprintf("No matched acl logs numbers for namespace %s, and actual matched logs are: \n %v ", ns1, matches))
exutil.By("7. Disable acl logs. \n")
disableACLOnNamespace(oc, ns1)
exutil.By("8. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
g.By("9. Verify no incremental acl logs. \n")
aclLogs2, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
matches2 := r.FindAllString(aclLogs2, -1)
aclLogNum2 := len(matches2)
o.Expect(aclLogNum2 == aclLogNum).To(o.BeTrue(), fmt.Sprintf("Before disable,actual matched logs are: \n %v ,after disable,actual matched logs are: \n %v", matches, matches2))
exutil.By("10. Enable acl logs. \n")
enableACLOnNamespace(oc, ns1, "alert", "alert")
exutil.By("11. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
g.By("12. Verify new acl logs for egressfirewall generated. \n")
aclLogs3, err3 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err3).NotTo(o.HaveOccurred())
matches3 := r.FindAllString(aclLogs3, -1)
aclLogNum3 := len(matches3)
o.Expect(aclLogNum3 > aclLogNum).To(o.BeTrue(), fmt.Sprintf("Previous actual matched logs are: \n %v ,after enable again,actual matched logs are: \n %v", matches, aclLogNum3))
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-Medium-53226-The namespace enabled acl logging will not affect the namespace not enabling acl logging.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("2. Enable ACL looging on the namespace ns1 \n")
enableACLOnNamespace(oc, ns1, "info", "info")
exutil.By("3. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("4. Create an EgressFirewall \n")
egressFW1 := egressFirewall2{
name: "default",
namespace: ns1,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW1.createEgressFW2Object(oc)
defer egressFW1.deleteEgressFW2Object(oc)
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
err = waitEgressFirewallApplied(oc, egressFW1.name, ns1)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
exutil.By("6. Verify acl logs for egressfirewall generated. \n")
egressFwRegex := fmt.Sprintf("EF:%s:.*", ns1)
aclLogs, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
r := regexp.MustCompile(egressFwRegex)
matches := r.FindAllString(aclLogs, -1)
aclLogNum := len(matches)
o.Expect(aclLogNum > 0).To(o.BeTrue())
exutil.By("7. Create a new namespace. \n")
oc.SetupProject()
ns2 := oc.Namespace()
exutil.By("8. create hello pod in ns2 \n")
pod2 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns2,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns2, pod2.name)
exutil.By("9. Generate egress traffic in ns2. \n")
_, err = e2eoutput.RunHostCmd(pod2.namespace, pod2.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("10. Verify no acl logs for egressfirewall generated in ns2. \n")
egressFwRegexNs2 := fmt.Sprintf("egressFirewall_%s_.*", ns2)
o.Consistently(func() int {
aclLogs2, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err).NotTo(o.HaveOccurred())
r2 := regexp.MustCompile(egressFwRegexNs2)
matches2 := r2.FindAllString(aclLogs2, -1)
return len(matches2)
}, 10*time.Second, 5*time.Second).Should(o.Equal(0))
exutil.By("11. Create an EgressFirewall in ns2 \n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns2,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
defer egressFW2.deleteEgressFW2Object(oc)
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns2, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
err = waitEgressFirewallApplied(oc, egressFW2.name, ns2)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("12. Generate egress traffic which will hit the egressfirewall in ns2. \n")
_, err = e2eoutput.RunHostCmd(pod2.namespace, pod2.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
exutil.By("13. Verify no acl logs for egressfirewall generated in ns2. \n")
o.Consistently(func() int {
aclLogs2, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err).NotTo(o.HaveOccurred())
r2 := regexp.MustCompile(egressFwRegexNs2)
matches2 := r2.FindAllString(aclLogs2, -1)
return len(matches2)
}, 10*time.Second, 5*time.Second).Should(o.Equal(0))
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-ConnectedOnly-High-55345-[FdpOvnOvs] Drop ACL for EgressFirewall should have priority lower than allow ACL despite being last in the chain.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate2 = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
egressFWTemplate1 = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
exutil.By("Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("Create an EgressFirewall \n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns1,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate2,
}
egressFW2.createEgressFW2Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Apply another EgressFirewall with allow rules under same namespace \n")
egressFW := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate1,
}
egressFW.createEgressFWObject1(oc)
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"dnsName\":\"www.test.com\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, egressFW.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Check the result, default deny rules should have lower priority than allow rules\n")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s", ns1)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
strLines := strings.Split(listOutput, "\n")
o.Expect(len(strLines) >= 2).Should(o.BeTrue(), fmt.Sprintf("The output of acl list is not as expected,\n%s", listOutput))
var allowRules []int
var denyRule int
for _, line := range strLines {
slice := strings.Fields(line)
if strings.Contains(line, "allow") {
priority := slice[1]
intVar, _ := strconv.Atoi(priority)
allowRules = append(allowRules, intVar)
}
if strings.Contains(line, "drop") {
priority := slice[1]
denyRule, _ = strconv.Atoi(priority)
}
}
for _, allow := range allowRules {
o.Expect(allow > denyRule).Should(o.BeTrue(), fmt.Sprintf("The allow rule priority is %v, the deny rule priority is %v.", allow, denyRule))
}
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-ConnectedOnly-High-59709-[FdpOvnOvs] No duplicate egressfirewall rules in the OVN Northbound database after restart OVN master pod. [Disruptive]", func() {
//This is from bug https://issues.redhat.com/browse/OCPBUGS-811
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressFWTemplate1 = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
exutil.By("Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("Create egressfirewall rules under same namespace \n")
egressFW := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate1,
}
egressFW.createEgressFWObject1(oc)
defer egressFW.deleteEgressFWObject1(oc)
efErr := waitEgressFirewallApplied(oc, egressFW.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Get the base number of egressfirewall rules\n")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s", ns1)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("The egressfirewall rules before restart ovn master pod: \n %s", listOutput)
baseCount := len(strings.Split(listOutput, "\n"))
exutil.By("Restart cluster-manager's ovnkube-node pod\n")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnMasterPodName, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("Check the result, the number of egressfirewal rules should be same as before.")
ovnMasterPodName = getOVNKMasterOVNkubeNode(oc)
listOutput, listErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("The egressfirewall rules after restart ovn master pod: \n %s", listOutput)
resultCount := len(strings.Split(listOutput, "\n"))
o.Expect(resultCount).Should(o.Equal(baseCount))
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-High-43464-EgressFirewall works with IPv6 address.", func() {
// Note: this case focuses on Egressfirewall working with IPv6 address, as ipv6 single cluster with proxy where egressfirewall cannot work, so only test it on dual stack.
// Currently only on the UPI packet dualstack cluster, the pod can access public website with IPv6 address.
ipStackType := checkIPStackType(oc)
if ipStackType != "dualstack" || !checkIPv6PublicAccess(oc) {
g.Skip("This case should be run on UPI packet dualstack cluster, skip other platform or network stack type.")
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("Create an EgressFirewall object with rule deny.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: "::/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
defer egressFW2.deleteEgressFW2Object(oc)
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
defer pod1.deletePingPod(oc)
exutil.By("Check both ipv6 and ipv4 are blocked")
_, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -6 www.google.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -4 www.google.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
exutil.By("Remove egressfirewall object")
egressFW2.deleteEgressFW2Object(oc)
exutil.By("Create an EgressFirewall object with rule allow.")
egressFW2 = egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Allow",
cidr: "::/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
errPatch = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\"::/0\"}},{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Check both ipv4 and ipv6 destination can be accessed")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -6 www.google.com --connect-timeout 5 -I")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -4 www.google.com --connect-timeout 5 -I")
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:jechen-High-44940-No segmentation error in ovnkube-control-plane or syntax error in ovn-controller after egressfirewall resource that referencing a DNS name is deleted.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
exutil.By("1. Create a new namespace, create an EgressFirewall object with references a DNS name in the namespace.")
ns := oc.Namespace()
egressFW1 := egressFirewall1{
name: "default",
namespace: ns,
template: egressFWTemplate,
}
defer egressFW1.deleteEgressFWObject1(oc)
egressFW1.createEgressFWObject1(oc)
efErr := waitEgressFirewallApplied(oc, egressFW1.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("2. Delete the EgressFirewall, check logs of ovnkube-control-plane pod for error, there should be no segementation error, no DNS value not found in dnsMap error message.")
removeResource(oc, true, true, "egressfirewall", egressFW1.name, "-n", egressFW1.namespace)
leaderCtrlPlanePod := getOVNKMasterPod(oc)
o.Expect(leaderCtrlPlanePod).ShouldNot(o.BeEmpty())
e2e.Logf("\n leaderCtrlPlanePod: %v\n", leaderCtrlPlanePod)
o.Consistently(func() bool {
podlogs, _ := oc.AsAdmin().Run("logs").Args(leaderCtrlPlanePod, "-n", "openshift-ovn-kubernetes", "-c", "ovnkube-cluster-manager").Output()
return strings.Count(podlogs, `SIGSEGV: segmentation violation`) == 0 && strings.Count(podlogs, `DNS value not found in dnsMap for domain`) == 0
}, 60*time.Second, 10*time.Second).Should(o.BeTrue(), "Segementation error or no DNS value in dnsMap error message found in ovnkube-control-plane pod log!!")
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-High-37778-EgressFirewall can be deleted after the project deleted.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressFWTemplate1 = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
exutil.By("Obtain the namespace \n")
oc.SetupProject()
ns1 := oc.Namespace()
exutil.By("Create egressfirewall rules under same namespace \n")
egressFW := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate1,
}
egressFW.createEgressFWObject1(oc)
defer egressFW.deleteEgressFWObject1(oc)
exutil.AssertWaitPollNoErr(waitEgressFirewallApplied(oc, egressFW.name, ns1), fmt.Sprintf("Wait for the egressFW/%s applied successfully timeout", egressFW.name))
exutil.By("Delete namespace .\n")
errNs := oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns1).Execute()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("Verify no egressfirewall object ")
outPut, errFW := oc.AsAdmin().Run("get").Args("egressfirewall", egressFW.name, "-n", ns1).Output()
o.Expect(errFW).To(o.HaveOccurred())
o.Expect(outPut).NotTo(o.ContainSubstring(egressFW.name))
exutil.By("Check ovn db, corresponding egressfirewall acls were deleted.")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s", ns1)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("The egressfirewall rules after project deleted: \n %s", listOutput)
o.Expect(listOutput).NotTo(o.ContainSubstring("allow"))
o.Expect(listOutput).NotTo(o.ContainSubstring("drop "))
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-High-60488-EgressFirewall works for a nodeSelector for matchLabels.", func() {
exutil.By("Label one node to match egressfirewall rule")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
ipStackType := checkIPStackType(oc)
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, node1, "ef-dep")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, node1, "ef-dep", "qe")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall3-template.yaml")
exutil.By("Get new namespace")
ns := oc.Namespace()
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0"
}
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Check the nodes can be acccessed or not")
// Will skip the test if the nodes IP cannot be pinged even without egressfirewall
node1IP1, node1IP2 := getNodeIP(oc, node1)
node2IP1, node2IP2 := getNodeIP(oc, node2)
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
if err != nil {
g.Skip("Ping node IP failed, skip the test in this environment.")
}
exutil.By("Create an EgressFirewall object with rule nodeSelector.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify the node matched egressfirewall will be allowed.")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP2)
return err
}, "10s", "5s").Should(o.HaveOccurred())
if ipStackType == "dualstack" {
// Test node ipv6 address as well
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP1)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP1)
return err
}, "10s", "5s").Should(o.HaveOccurred())
}
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-High-60812-EgressFirewall works for a nodeSelector for matchExpressions.", func() {
exutil.By("Label one node to match egressfirewall rule")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
ipStackType := checkIPStackType(oc)
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, node1, "ef-org")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, node1, "ef-org", "dev")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall4-template.yaml")
exutil.By("Get new namespace")
ns := oc.Namespace()
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0"
}
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Check the nodes can be acccessed or not")
// Will skip the test if the nodes IP cannot be pinged even without egressfirewall
node1IP1, node1IP2 := getNodeIP(oc, node1)
node2IP1, node2IP2 := getNodeIP(oc, node2)
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
if err != nil {
g.Skip("Ping node IP failed, skip the test in this environment.")
}
exutil.By("Create an EgressFirewall object with rule nodeSelector.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify the node matched egressfirewall will be allowed, unmatched will be blocked!!")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP2)
return err
}, "10s", "5s").Should(o.HaveOccurred())
if ipStackType == "dualstack" {
// Test node ipv6 address as well
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP1)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP1)
return err
}, "10s", "5s").Should(o.HaveOccurred())
}
})
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:huirwang-High-61213-Delete IGMP Groups when deleting stale chassis.[Disruptive]", func() {
// This is from bug https://issues.redhat.com/browse/OCPBUGS-7230
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
g.Skip("Skip for non-supported auto scaling machineset platforms!!")
}
clusterinfra.SkipConditionally(oc)
exutil.By("Create a new machineset with 2 nodes")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-61213"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
nodeName0 := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
nodeName1 := clusterinfra.GetNodeNameFromMachine(oc, machineName[1])
exutil.By("Obtain the namespace \n")
ns := oc.Namespace()
exutil.By("Enable multicast on namespace \n")
enableMulticast(oc, ns)
exutil.By("Delete ovnkuber-master pods and two nodes \n")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-l", "app=ovnkube-control-plane", "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
err = ms.DeleteMachineSet(oc)
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
exutil.By("Wait ovnkuber-control-plane pods ready\n")
err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
exutil.AssertWaitPollNoErr(err, "ovnkube-control-plane pods are not ready")
exutil.By("Check ovn db, the stale chassis for deleted node should be deleted")
for _, machine := range []string{nodeName0, nodeName1} {
ovnACLCmd := fmt.Sprintf("ovn-sbctl --columns _uuid,hostname list chassis")
ovnMasterSourthDBLeaderPod := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
outPut, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterSourthDBLeaderPod, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
return outPut
}, "120s", "10s").ShouldNot(o.ContainSubstring(machine), "The stale chassis still existed!")
}
exutil.By("Check ovnkuber control plane logs, no IGMP_Group logs")
ovnMasterPodName := getOVNKMasterPod(oc)
searchString := "Transaction causes multiple rows in \"IGMP_Group\" table to have identical values"
logContents, logErr := exutil.GetSpecificPodLogs(oc, "openshift-ovn-kubernetes", "ovnkube-cluster-manager", ovnMasterPodName, "")
o.Expect(logErr).ShouldNot(o.HaveOccurred())
o.Expect(strings.Contains(logContents, searchString)).Should(o.BeFalse())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-NonPreRelease-PreChkUpgrade-Author:huirwang-High-62056-Check egressfirewall is functional post upgrade", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
ns = "62056-upgrade-ns"
allowedIPList = []string{}
ipv6CIDR string
ipv4CIDR string
)
exutil.By("1. create new namespace")
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. Get an IP address for domain name www.redhat.com for allow rule ")
allowedIPv4, allowedIPv6 := getIPFromDnsName("www.redhat.com")
o.Expect(len(allowedIPv4) == 0).NotTo(o.BeTrue())
ipv4CIDR = allowedIPv4 + "/32"
allowedIPList = append(allowedIPList, allowedIPv4)
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
if checkIPv6PublicAccess(oc) {
o.Expect(len(allowedIPv6) == 0).NotTo(o.BeTrue())
ipv6CIDR = allowedIPv6 + "/128"
allowedIPList = append(allowedIPList, allowedIPv6)
} else {
e2e.Logf("Dual stack cluster does not have access to public websites for IPv6 address.")
}
}
exutil.By("3. Create an EgressFirewall object with rule deny.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
exutil.By("4. Update EgressFirewall object with rule specific allow rule.")
if ipStackType == "dualstack" && checkIPv6PublicAccess(oc) {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv4CIDR+"\"}},{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv6CIDR+"\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
} else {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv4CIDR+"\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("5. Create a pod in the namespace")
createResourceFromFile(oc, ns, statefulSetHelloPod)
podErr := waitForPodWithLabelReady(oc, ns, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodname := getPodName(oc, ns, "app=hello")[0]
exutil.By("6. Check the allowed destination can be accessed!")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible with just egress firewall", allowedIPList[i]))
verifyDstIPAccess(oc, helloPodname, ns, allowedIPList[i], true)
}
exutil.By("7.Check the other website can be blocked!")
_, err = e2eoutput.RunHostCmd(ns, helloPodname, "curl yahoo.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-NonPreRelease-PstChkUpgrade-Author:huirwang-High-62056-Check egressfirewall is functional post upgrade", func() {
ns := "62056-upgrade-ns"
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", ns).Execute()
if nsErr != nil {
g.Skip("Skip the PstChkUpgrade test as 62056-upgrade-ns namespace does not exist, PreChkUpgrade test did not run")
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--ignore-not-found=true").Execute()
exutil.By("Verify if EgressFirewall was applied correctly")
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Get allow IP list ")
cidrList, cidrErr := oc.AsAdmin().Run("get").Args("-n", ns, "egressfirewall.k8s.ovn.org/default", "-o=jsonpath={.spec.egress[?(@.type==\"Allow\")].to.cidrSelector}").Output()
o.Expect(cidrErr).NotTo(o.HaveOccurred())
o.Expect(cidrList == "").NotTo(o.BeTrue())
e2e.Logf("The allowed destination IPs are: %s", cidrList)
// Regular expression to match IPv4 and IPv6 addresses with CIDR notation
ipRegex := `(?:\d{1,3}\.){3}\d{1,3}\/\d{1,2}|[0-9a-fA-F:]+(?::[0-9a-fA-F]{1,4}){1,7}\/\d{1,3}`
re := regexp.MustCompile(ipRegex)
matches := re.FindAllString(cidrList, -1)
var allowedIPList []string
for _, match := range matches {
// Split the match on the '/' character and take only the IP part
ip := strings.Split(match, "/")[0]
allowedIPList = append(allowedIPList, ip)
}
exutil.By("Get the pod in the namespace")
podErr := waitForPodWithLabelReady(oc, ns, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodname := getPodName(oc, ns, "app=hello")[0]
exutil.By("Check the allowed destination can be accessed!")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible with just egress firewall", allowedIPList[i]))
verifyDstIPAccess(oc, helloPodname, ns, allowedIPList[i], true)
}
exutil.By("Check the other website can be blocked!")
_, err := e2eoutput.RunHostCmd(ns, helloPodname, "curl yahoo.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-High-61176-High-61177-79704-Medium-[FdpOvnOvs] EgressFirewall with dnsName in uppercase can be created, and EgressFirewall should work with namespace that is longer than forth-three characters even after restart. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
ns := "test-egressfirewall-with-a-very-long-namespace-61176-61177"
exutil.By("1. Create a long namespace over 43 characters, create an EgressFirewall object with mixed of Allow and Deny rules.")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--ignore-not-found=true").Execute()
nsErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns).Execute()
o.Expect(nsErr).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, ns)
egressFW5 := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "dnsName",
rulevalue1: "WWW.GOOGLE.COM",
protocol1: "TCP",
portnumber1: 443,
ruletype2: "Deny",
rulename2: "dnsName",
rulevalue2: "www.facebook.com",
protocol2: "TCP",
portnumber2: 443,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW5.name, "-n", egressFW5.namespace)
egressFW5.createEgressFW5Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW5.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
e2e.Logf("\n egressfirewall is applied\n")
exutil.By("2. Create a test pod in the namespace")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc.AsAdmin())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1.name, "-n", pod1.namespace).Execute()
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("3. Check www.facebook.com is blocked \n")
o.Eventually(func() bool {
_, stderr, _ := e2eoutput.RunHostCmdWithFullOutput(pod1.namespace, pod1.name, "curl -I -k https://www.facebook.com --connect-timeout 5")
return stderr != ""
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected!!")
exutil.By("4. Check www.google.com is allowed \n")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -I -k https://www.google.com --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work as expected!!")
testPodNodeName, _ := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name)
o.Expect(testPodNodeName != "").Should(o.BeTrue())
e2e.Logf("node name for the test pod is: %v", testPodNodeName)
exutil.By("5. Check ACLs in northdb. \n")
masterOVNKubeNodePod := getOVNKMasterOVNkubeNode(oc)
o.Expect(masterOVNKubeNodePod != "").Should(o.BeTrue())
aclCmd := "ovn-nbctl --no-leader-only find acl|grep external_ids|grep test-egressfirewall-with-a-very-long-namespace ||true"
checkAclErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
aclOutput, aclErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", masterOVNKubeNodePod, aclCmd)
if aclErr != nil {
e2e.Logf("%v,Waiting for ACLs to be synced, try next ...,", aclErr)
return false, nil
}
// check ACLs rules for the long namespace
if strings.Contains(aclOutput, "test-egressfirewall-with-a-very-long-namespace") && strings.Count(aclOutput, "test-egressfirewall-with-a-very-long-namespace") == 4 {
e2e.Logf("The ACLs for egressfirewall in northbd are as expected!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkAclErr, "ACLs were not synced correctly!")
exutil.By("6. Restart OVNK nodes\n")
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
delPodErr := oc.AsAdmin().Run("delete").Args("pod", "-l", "app=ovnkube-node", "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("7. Check ACL again in northdb after restart. \n")
// since ovnkube-node pods are re-created during restart, obtain ovnMasterOVNkubeNodePod again
masterOVNKubeNodePod = getOVNKMasterOVNkubeNode(oc)
o.Expect(masterOVNKubeNodePod != "").Should(o.BeTrue())
checkAclErr = wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
aclOutput, aclErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", masterOVNKubeNodePod, aclCmd)
if aclErr != nil {
e2e.Logf("%v,Waiting for ACLs to be synced, try next ...,", aclErr)
return false, nil
}
// check ACLs rules for the long namespace after restart
if strings.Contains(aclOutput, "test-egressfirewall-with-a-very-long-namespace") && strings.Count(aclOutput, "test-egressfirewall-with-a-very-long-namespace") == 4 {
e2e.Logf("The ACLs for egressfirewall in northbd are as expected!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkAclErr, "ACLs were not synced correctly!")
exutil.By("8. Check egressfirewall rules still work correctly after restart \n")
o.Eventually(func() bool {
_, stderr, _ := e2eoutput.RunHostCmdWithFullOutput(pod1.namespace, pod1.name, "curl -I -k https://www.facebook.com --connect-timeout 5")
return stderr != ""
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work correctly after restart!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -I -k https://www.google.com --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work correctly after restart!!")
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:jechen-High-37774-Set EgressFirewall to limit the pod connection to specific CIDR ranges in different namespaces.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
url1 := "www.yahoo.com" // used as Deny rule for first namespace
url2 := "www.ericsson.com" // used as Deny rule for second namespace
url3 := "www.google.com" // is not used as Deny rule in either namespace
exutil.By("1. nslookup obtain dns server ip for url1 and url2\n")
ips1, err := net.LookupIP(url1)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ip address from nslookup for %v: %v", url1, ips1)
ips2, err := net.LookupIP(url2)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ip address from lookup for %v: %v", url2, ips2)
ips3, err := net.LookupIP(url3)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ip address from lookup for %v: %v", url3, ips3)
ipStackType := checkIPStackType(oc)
e2e.Logf("\n ipStackType: %v\n", ipStackType)
// get all IPv4 and IPv6 addresses of 3 hosts above
var ipv4Addr1, ipv6Addr1, ipv4Addr2, ipv6Addr2, ipv4Addr3, ipv6Addr3 []string
for j := 0; j <= len(ips1)-1; j++ {
if IsIPv4(ips1[j].String()) {
ipv4Addr1 = append(ipv4Addr1, ips1[j].String())
}
if IsIPv6(ips1[j].String()) {
ipv6Addr1 = append(ipv6Addr1, ips1[j].String())
}
}
for j := 0; j <= len(ips2)-1; j++ {
if IsIPv4(ips2[j].String()) {
ipv4Addr2 = append(ipv4Addr2, ips2[j].String())
}
if IsIPv6(ips2[j].String()) {
ipv6Addr2 = append(ipv6Addr2, ips2[j].String())
}
}
for j := 0; j <= len(ips3)-1; j++ {
if IsIPv4(ips3[j].String()) {
ipv4Addr3 = append(ipv4Addr3, ips3[j].String())
}
if IsIPv6(ips3[j].String()) {
ipv6Addr3 = append(ipv6Addr3, ips3[j].String())
}
}
e2e.Logf("ipv4Address1: %v, ipv6Address1: %v\n\n", ipv4Addr1, ipv6Addr1)
e2e.Logf("ipv4Address2: %v, ipv6Address2: %v\n\n", ipv4Addr2, ipv6Addr2)
e2e.Logf("ipv4Address3: %v, ipv6Address3: %v\n\n", ipv4Addr3, ipv6Addr3)
//Store IPv4 addresses of the 3 hosts above in ip1, ip2, ip3
//Store IPv6 addresses of the 3 hosts above in ip4, ip5, ip6
var cidrValue1, cidrValue2, cidrValue3, cidrValue4, ip1, ip2, ip3, ip4, ip5, ip6 string
if ipStackType == "ipv6single" {
if len(ipv6Addr1) < 2 || len(ipv6Addr2) < 2 || len(ipv6Addr3) < 2 {
g.Skip("Not enough IPv6 address for the hosts that are used in this test with v6 single cluster, need two IPv6 addresses from each host, skip the test.")
}
ip1 = ipv6Addr1[0]
ip2 = ipv6Addr2[0]
ip3 = ipv6Addr3[0]
cidrValue1 = ip1 + "/128"
cidrValue2 = ip2 + "/128"
ip4 = ipv6Addr1[1]
ip5 = ipv6Addr2[1]
ip6 = ipv6Addr3[1]
cidrValue3 = ip4 + "/128"
cidrValue4 = ip5 + "/128"
} else if ipStackType == "ipv4single" {
if len(ipv4Addr1) < 2 || len(ipv4Addr2) < 2 || len(ipv4Addr3) < 2 {
g.Skip("Not enough IPv4 address for the hosts that are used in this test with V4 single cluster, need two IPv4 addresses from each host, skip the test.")
}
ip1 = ipv4Addr1[0]
ip2 = ipv4Addr2[0]
ip3 = ipv4Addr3[0]
cidrValue1 = ip1 + "/32"
cidrValue2 = ip2 + "/32"
ip4 = ipv4Addr1[1]
ip5 = ipv4Addr2[1]
ip6 = ipv4Addr3[1]
cidrValue3 = ip4 + "/32"
cidrValue4 = ip5 + "/32"
} else if ipStackType == "dualstack" {
if len(ipv4Addr1) < 1 || len(ipv4Addr2) < 1 || len(ipv4Addr3) < 1 || len(ipv6Addr1) < 1 || len(ipv6Addr2) < 1 || len(ipv6Addr3) < 1 {
g.Skip("Not enough IPv4 or IPv6 address for the hosts that are used in this test with dualstack cluster, need at least one IPv4 and one IPv6 address from each host, skip the test.")
}
ip1 = ipv4Addr1[0]
ip2 = ipv4Addr2[0]
ip3 = ipv4Addr3[0]
cidrValue1 = ip1 + "/32"
cidrValue2 = ip2 + "/32"
ip4 = ipv6Addr1[0]
ip5 = ipv6Addr2[0]
ip6 = ipv6Addr3[0]
cidrValue3 = ip4 + "/128"
cidrValue4 = ip5 + "/128"
}
e2e.Logf("\n cidrValue1: %v, cidrValue2: %v\n", cidrValue1, cidrValue2)
e2e.Logf("\n IP1: %v, IP2: %v, IP3: %v\n", ip1, ip2, ip3)
e2e.Logf("\n cidrValue3: %v, cidrValue4: %v\n", cidrValue3, cidrValue4)
e2e.Logf("\n IP4: %v, IP5: %v, IP6: %v\n", ip4, ip5, ip6)
exutil.By("2. Obtain first namespace, create egressfirewall1 in it\n")
ns1 := oc.Namespace()
egressFW1 := egressFirewall5{
name: "default",
namespace: ns1,
ruletype1: "Deny",
rulename1: "cidrSelector",
rulevalue1: cidrValue1,
protocol1: "TCP",
portnumber1: 443,
ruletype2: "Allow",
rulename2: "dnsName",
rulevalue2: "www.redhat.com",
protocol2: "TCP",
portnumber2: 443,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW1.name, "-n", egressFW1.namespace)
egressFW1.createEgressFW5Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW1.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
e2e.Logf("\n egressfirewall is applied\n")
exutil.By("3. Create a test pod in first namespace")
pod1ns1 := pingPodResource{
name: "hello-pod1",
namespace: ns1,
template: pingPodTemplate,
}
pod1ns1.createPingPod(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1ns1.name, "-n", pod1ns1.namespace).Execute()
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("4. Create a second namespace, and create egressfirewall2 in it\n")
oc.SetupProject()
ns2 := oc.Namespace()
egressFW2 := egressFirewall5{
name: "default",
namespace: ns2,
ruletype1: "Deny",
rulename1: "cidrSelector",
rulevalue1: cidrValue2,
protocol1: "TCP",
portnumber1: 443,
ruletype2: "Deny",
rulename2: "dnsName",
rulevalue2: "www.redhat.com",
protocol2: "TCP",
portnumber2: 443,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW2.name, "-n", egressFW2.namespace)
egressFW2.createEgressFW5Object(oc)
efErr = waitEgressFirewallApplied(oc, egressFW2.name, ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
e2e.Logf("\n egressfirewall is applied\n")
exutil.By("5. Create a test pod in second namespace")
pod2ns2 := pingPodResource{
name: "hello-pod2",
namespace: ns2,
template: pingPodTemplate,
}
pod2ns2.createPingPod(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod2ns2.name, "-n", pod2ns2.namespace).Execute()
waitPodReady(oc, pod2ns2.namespace, pod2ns2.name)
// for v4 single, test v4 CIDR first, then test it be replaced by another v4 CIDR
// for V6 single, test v4 CIDR first, then test it be replaced by another v4 CIDR
// for dualStack, test v4 CIDR first, then test it be replaced by another v6 CIDR
var curlCmd1, curlCmd2, curlCmd3, newCurlCmd1, newCurlCmd2, newCurlCmd3 string
if ipStackType == "ipv4single" {
curlCmd1 = "curl -I -4 -k https://" + url1 + " --resolve " + url1 + ":443:" + ip1 + " --connect-timeout 5"
curlCmd2 = "curl -I -4 -k https://" + url2 + " --resolve " + url2 + ":443:" + ip2 + " --connect-timeout 5"
curlCmd3 = "curl -I -4 -k https://" + url3 + " --resolve " + url3 + ":443:" + ip3 + " --connect-timeout 5"
newCurlCmd1 = "curl -I -4 -k https://" + url1 + " --resolve " + url1 + ":443:" + ip4 + " --connect-timeout 5"
newCurlCmd2 = "curl -I -4 -k https://" + url2 + " --resolve " + url2 + ":443:" + ip5 + " --connect-timeout 5"
newCurlCmd3 = "curl -I -4 -k https://" + url3 + " --resolve " + url3 + ":443:" + ip6 + " --connect-timeout 5"
} else if ipStackType == "ipv6single" {
curlCmd1 = "curl -I -6 -k https://" + url1 + " --resolve " + url1 + ":443:[" + ip1 + "] --connect-timeout 5"
curlCmd2 = "curl -I -6 -k https://" + url2 + " --resolve " + url2 + ":443:[" + ip2 + "] --connect-timeout 5"
curlCmd3 = "curl -I -6 -k https://" + url3 + " --resolve " + url3 + ":443:[" + ip3 + "] --connect-timeout 5"
newCurlCmd1 = "curl -I -6 -k https://" + url1 + " --resolve " + url1 + ":443:[" + ip4 + "] --connect-timeout 5"
newCurlCmd2 = "curl -I -6 -k https://" + url2 + " --resolve " + url2 + ":443:[" + ip5 + "] --connect-timeout 5"
newCurlCmd3 = "curl -I -6 -k https://" + url3 + " --resolve " + url3 + ":443:[" + ip6 + "] --connect-timeout 5"
} else if ipStackType == "dualstack" { // for dualstack, use v6 CIDR to replace v4 CIDR
curlCmd1 = "curl -I -4 -k https://" + url1 + " --resolve " + url1 + ":443:" + ip1 + " --connect-timeout 5"
curlCmd2 = "curl -I -4 -k https://" + url2 + " --resolve " + url2 + ":443:" + ip2 + " --connect-timeout 5"
curlCmd3 = "curl -I -4 -k https://" + url3 + " --resolve " + url3 + ":443:" + ip3 + " --connect-timeout 5"
newCurlCmd1 = "curl -I -6 -k https://" + url1 + " --resolve " + url1 + ":443:[" + ip4 + "] --connect-timeout 5"
newCurlCmd2 = "curl -I -6 -k https://" + url2 + " --resolve " + url2 + ":443:[" + ip5 + "] --connect-timeout 5"
newCurlCmd3 = "curl -I -6 -k https://" + url3 + " --resolve " + url3 + ":443:[" + ip6 + "] --connect-timeout 5"
}
exutil.By("\n6.1. Check deny rule of first namespace is blocked from test pod of first namespace because of the deny rule in first namespace\n")
_, err1 := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, curlCmd1)
o.Expect(err1).To(o.HaveOccurred(), "curl the deny rule of first namespace from first namespace failed")
exutil.By("\n6.2. Check deny rule of second namespce is allowed from test pod of first namespace, it is not affected by deny rile in second namespace\n")
_, err2 := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, curlCmd2)
o.Expect(err2).NotTo(o.HaveOccurred(), "curl the deny rule of second namespace from first namespace failed")
exutil.By("\n6.3. Check url3 is allowed from test pod of first namespace, it is not affected by either deny rule of two namespaces\n")
_, err3 := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, curlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from first namesapce failed")
exutil.By("\n7.1. Check deny rule of first namespace is allowed from test pod of second namespace, it is not affected by deny rule in first namespace\n")
_, err1 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, curlCmd1)
o.Expect(err1).NotTo(o.HaveOccurred(), "curl the deny rule of second namespace from first namespace failed")
exutil.By("\n7.2. Check deny rule in second namespace is blocked from test pod of second namespace because of the deny rule in second namespace\n")
_, err2 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, curlCmd2)
o.Expect(err2).To(o.HaveOccurred(), "curl the deny rule of second namespace from second namespace failed")
exutil.By("\n7.3. Check url3 is allowed from test pod of second namespace, it is not affected by either deny rule of two namespaces\n")
_, err3 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, curlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from first namesapce failed")
exutil.By("\n\n8. Replace CIDR of first rule of each egressfirewall with another CIDR \n\n")
change1 := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/cidrSelector\", \"value\":\"" + cidrValue3 + "\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
change2 := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/cidrSelector\", \"value\":\"" + cidrValue4 + "\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newCidr, cidrErr := oc.AsAdmin().Run("get").Args("-n", ns1, "egressfirewall.k8s.ovn.org/default", "-o=jsonpath={.spec.egress[0].to.cidrSelector}").Output()
o.Expect(cidrErr).NotTo(o.HaveOccurred())
o.Expect(newCidr == cidrValue3).Should(o.BeTrue())
e2e.Logf("\n\nnew CIDR for first rule in first namespace %v is %v\n\n", ns1, newCidr)
newCidr, cidrErr = oc.AsAdmin().Run("get").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "-o=jsonpath={.spec.egress[0].to.cidrSelector}").Output()
o.Expect(cidrErr).NotTo(o.HaveOccurred())
o.Expect(newCidr == cidrValue4).Should(o.BeTrue())
e2e.Logf("\n\nnew CIDR for first rule in second namespace %v is %v\n\n", ns2, newCidr)
exutil.By("\n\n Repeat curl tests with after CIDR update \n\n")
exutil.By("\n8.1 Curl deny rule of first namespace from first namespace\n")
_, err1 = e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, newCurlCmd1)
o.Expect(err1).To(o.HaveOccurred(), "curl the deny rule of first namespace from first namespace failed after CIDR update")
exutil.By("\n8.2 Curl deny rule of second namespace from first namespace\n")
_, err2 = e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, newCurlCmd2)
o.Expect(err2).NotTo(o.HaveOccurred(), "curl the deny rule of second namespace from first namespace failed after CIDR update")
exutil.By("\n8.3 Curl url with no rule from first namespace\n")
_, err3 = e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, newCurlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from first namesapce failed after CIDR update")
exutil.By("\n8.4 Curl deny rule of first namespace from second namespace\n")
_, err1 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, newCurlCmd1)
o.Expect(err1).NotTo(o.HaveOccurred(), "curl the deny rule of first namespace from second namespace failed after CIDR update")
exutil.By("\n8.5 Curl deny rule of second namespace from second namespace\n")
_, err2 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, newCurlCmd2)
o.Expect(err2).To(o.HaveOccurred(), "curl the deny rule of second namespace from second namespace failed after CIDR update")
exutil.By("\n8.6 Curl url with no rule from second namespace\n")
_, err3 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, newCurlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from second namesapce failed after CIDR update")
exutil.By("\n9. Change the Allow rule of egressfirewall of first namespace to be denied\n")
change := "[{\"op\":\"replace\",\"path\":\"/spec/egress/1/type\", \"value\":\"Deny\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// After second rule in first namespace is changed from Allow to Deny, access to www.redhat.com should be blocked from first namespace
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -4 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in first namespace after rule change for IPv4!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -4 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in second namespace for IPv4!!")
if ipStackType == "dualstack" {
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -6 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in first namespace after rule change for IPv6 !!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -6 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in second namespace for IPv6!!")
}
exutil.By("\n10. Change the second Deny rule of egressfirewall of second namespace to be allowed\n")
change = "[{\"op\":\"replace\",\"path\":\"/spec/egress/1/type\", \"value\":\"Allow\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// After second rule in second namespace is changed from Deny to Allow, access to www.redhat.com should be still be blocked from first namespace but allowed from second namespace
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -4 https://www.redhat.com/en --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "After rule change, Allow rule in second namespace does not affect first namespace for IPv4!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -4 https://www.redhat.com/en --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work as expected in second namespace after rule change for IPv4!!")
if ipStackType == "dualstack" {
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -6 https://www.redhat.com/en --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "After rule change, Allow rule in second namespace does not affect first namespace for IPv6!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -6 https://www.redhat.com/en --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work as expected in second namespace after rule change for IPv6 !!")
}
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-High-65173-Misconfigured Egress Firewall can be corrected.", func() {
//This is from customer bug https://issues.redhat.com/browse/OCPBUGS-15182
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressFWTemplate2 = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
)
exutil.By("Obtain the namespace \n")
ns := oc.Namespace()
exutil.By("Create an EgressFirewall with missing cidr prefix\n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: "1.1.1.1",
template: egressFWTemplate2,
}
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify EgressFirewall was not applied correctly\n")
checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
output, efErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", "-n", ns, egressFW2.name).Output()
if efErr != nil {
e2e.Logf("Failed to get egressfirewall %v, error: %s. Trying again", egressFW2, efErr)
return false, nil
}
if !strings.Contains(output, "EgressFirewall Rules not correctly applied") {
e2e.Logf("The egressfirewall output message not expexted, trying again. \n %s", output)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("EgressFirewall with missing cidr prefix should not be applied correctly!"))
exutil.By("Apply EgressFirewall again with correct cidr\n")
egressFW2.cidr = "1.1.1.0/24"
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify EgressFirewall was applied correctly\n")
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-High-72054-EgressFirewall rules should include all IPs of matched node when nodeSelector is used.", func() {
// https://issues.redhat.com/browse/OCPBUGS-13665
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall3-template.yaml")
exutil.By("1. Label one node to match egressfirewall rule")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
// node1 is going to be labelled to be a matched node, node2 is not labelled so it is not a matched node
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, node1, "ef-dep")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, node1, "ef-dep", "qe")
// Get all host IPs of both nodes
allNode1IPsv4, allNode1IPsv6 := getAllHostCIDR(oc, node1)
allNode2IPsv4, allNode2IPRv6 := getAllHostCIDR(oc, node2)
exutil.By("2. Get new namespace")
ns := oc.Namespace()
exutil.By("3. Create a pod in the namespace")
testPod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
testPod.createPingPod(oc)
waitPodReady(oc, testPod.namespace, testPod.name)
exutil.By("4.Check the nodes can be acccessed before egressFirewall with nodeSelector is applied")
if !checkNodeAccessibilityFromAPod(oc, node1, testPod.namespace, testPod.name) || !checkNodeAccessibilityFromAPod(oc, node2, testPod.namespace, testPod.name) {
g.Skip("Pre-test check failed, test is skipped!")
}
exutil.By(" 5. Create an egressFirewall with rule nodeSelector.")
ipStackType := checkIPStackType(oc)
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0" // for Dualstack, test with v4 CIDR first, then test V6 CIDR later
}
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By(" 6. Verify Egress firewall rules in NBDB of all nodes.")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s | grep allow", ns)
nodelist, nodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(nodeErr).NotTo(o.HaveOccurred())
o.Expect(len(nodelist)).NotTo(o.BeEquivalentTo(0))
for _, eachNode := range nodelist {
ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", eachNode)
o.Expect(podErr).NotTo(o.HaveOccurred())
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKubePod, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
// egressFirewall rules should include all the IPs of the matched node1 in NBDB, but do not include IPs for unmatched node2
if ipStackType == "dualstack" || ipStackType == "ipv4single" {
for _, nodeIPv4Addr := range allNode1IPsv4 {
o.Expect(listOutput).Should(o.ContainSubstring(nodeIPv4Addr), fmt.Sprintf("%s for node %s is not in egressfirewall rules as expected", nodeIPv4Addr, node1))
}
for _, nodeIPv4Addr := range allNode2IPsv4 {
o.Expect(listOutput).ShouldNot(o.ContainSubstring(nodeIPv4Addr), fmt.Sprintf("%s for node %s should not be in egressfirewall rules", nodeIPv4Addr, node2))
}
}
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
for _, nodeIPv6Addr := range allNode1IPsv6 {
o.Expect(listOutput).Should(o.ContainSubstring(nodeIPv6Addr), fmt.Sprintf("%s for node %s is not in egressfirewall rules as expected", nodeIPv6Addr, node1))
}
for _, nodeIPv6Addr := range allNode2IPRv6 {
o.Expect(listOutput).ShouldNot(o.ContainSubstring(nodeIPv6Addr), fmt.Sprintf("%s for node %s should not be in egressfirewall rules", nodeIPv6Addr, node2))
}
}
}
exutil.By(" 7. Verified matched node can be accessed from all its interfaces, unmatched node can not be accessed from any of its interfaces.")
result1 := checkNodeAccessibilityFromAPod(oc, node1, testPod.namespace, testPod.name)
o.Expect(result1).Should(o.BeTrue())
result2 := checkNodeAccessibilityFromAPod(oc, node2, testPod.namespace, testPod.name)
o.Expect(result2).Should(o.BeFalse())
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
// Delete original egressFirewall, recreate the egressFirewall with IPv6 CIDR, then check access to nodes through IPv6 interfaces
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
result1 := checkNodeAccessibilityFromAPod(oc, node1, testPod.namespace, testPod.name)
o.Expect(result1).Should(o.BeTrue())
result2 := checkNodeAccessibilityFromAPod(oc, node2, testPod.namespace, testPod.name)
o.Expect(result2).Should(o.BeFalse())
}
})
// author: [email protected]
g.It("Author:huirwang-ConnectedOnly-Medium-67491-[FdpOvnOvs] EgressFirewall works with ANP, BANP and NP for egress traffic.", func() {
ipStackType := checkIPStackType(oc)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "none")
if !(ipStackType == "ipv4single" || (acceptedPlatform && ipStackType == "dualstack")) {
g.Skip("This case should be run on UPI packet dualstack cluster or IPv4 cluster, skip other platform or network stack type.")
}
var (
testID = "67491"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
pingPodTemplate = filepath.Join(testDataDir, "ping-for-pod-template.yaml")
egressFWTemplate = filepath.Join(testDataDir, "egressfirewall2-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-egress-single-CIDR-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
)
exutil.By("Get test namespace")
ns := oc.Namespace()
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("4. Create a Baseline Admin Network Policy with deny action to cidr")
banpCR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: ns,
ruleName: "default-deny-to-" + ns,
ruleAction: "Deny",
cidr: "0.0.0.0/0",
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("Get one IP address for domain name www.google.com")
ipv4, ipv6 := getIPFromDnsName("www.google.com")
o.Expect(len(ipv4) == 0).NotTo(o.BeTrue())
exutil.By("Create an EgressFirewall \n")
egressFW := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Allow",
cidr: ipv4 + "/32",
template: egressFWTemplate,
}
egressFW.createEgressFW2Object(oc)
err = waitEgressFirewallApplied(oc, egressFW.name, ns)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv4, false)
exutil.By("Remove BANP")
removeResource(oc, true, true, "banp", banpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv4, true)
exutil.By("Create ANP with deny action to cidr")
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-" + testID,
subjectKey: matchLabelKey,
subjectVal: ns,
priority: 10,
ruleName: "allow-to-" + ns,
ruleAction: "Deny",
cidr: "0.0.0.0/0",
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv4, false)
exutil.By("Remove ANP")
removeResource(oc, true, true, "anp", anpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv4, true)
exutil.By("Create Network Policy with limited access to cidr which is not same as egressfirewall")
npIPBlock := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: "1.1.1.1/32",
namespace: ns,
}
npIPBlock.createipBlockCIDRObjectSingle(oc)
output, err = oc.AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress"))
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv4, false)
exutil.By("Remove network policy")
removeResource(oc, true, true, "-n", ns, "networkpolicy", npIPBlock.name)
if ipStackType == "dualstack" {
// Retest with ipv6 address
if !checkIPv6PublicAccess(oc) {
g.Skip("Not be able to access the public website with IPv6,skip below test steps!!")
}
o.Expect(len(ipv6) == 0).NotTo(o.BeTrue())
exutil.By("Create ANP with deny action to ipv6 cidr")
banpCR.cidr = "::/0"
banpCR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("Update egressfirewall with ipv6 address")
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv6+"/128\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv6, false)
exutil.By("Remove BANP")
removeResource(oc, true, true, "banp", banpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv6, true)
exutil.By("Create ANP")
anpCR.cidr = "::/0"
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv6, false)
exutil.By("Remove ANP")
removeResource(oc, true, true, "anp", anpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv6, true)
exutil.By("Create Network Policy")
npIPBlock.cidr = "2001::02/128"
npIPBlock.createipBlockCIDRObjectSingle(oc)
output, err = oc.AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress"))
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv6, false)
}
})
// author: [email protected]
g.It("Author:huirwang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-74657-EgressFirewall nodeSelector works after some specific operations. [Disruptive]", func() {
//https://issues.redhat.com/browse/OCPBUGS-34331
exutil.By("Get worker nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall3-template.yaml")
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create egressfirewall from file")
type egressFirewallConfig struct {
Domains []string
}
outputEFFilePath := "/tmp/egress_firewall_8000.yaml"
domainsPerFile := 7999
yamlTemplate := `apiVersion: k8s.ovn.org/v1
kind: EgressFirewall
metadata:
name: default
spec:
egress:
{{- range .Domains }}
- type: Allow
to:
dnsName: {{ . }}
{{- end }}
- type: Deny
to:
cidrSelector: 0.0.0.0/0
`
// Parse the YAML template
tmpl, err := template.New("egressFirewall").Parse(yamlTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
// Generate the egressfirewall file
domains := make([]string, domainsPerFile)
for i := 0; i < domainsPerFile; i++ {
domains[i] = fmt.Sprintf("fake-domain-%d.com", i+1)
}
// Create the EgressFirewallConfig struct
config := egressFirewallConfig{Domains: domains}
// Open the output file
defer os.Remove(outputEFFilePath)
outputFile, err := os.Create(outputEFFilePath)
o.Expect(err).NotTo(o.HaveOccurred())
// Execute the template and write to the file
err = tmpl.Execute(outputFile, config)
o.Expect(err).NotTo(o.HaveOccurred())
outputFile.Close()
e2e.Logf("Successfully generated %s\n", outputEFFilePath)
err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f", outputEFFilePath, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall/default", "-n", ns).Output()
e2e.Logf("\n EgressFiewall status: %v\n", output)
return strings.Contains(output, "EgressFirewall Rules applied")
}, "1800s", "30s").Should(o.BeTrue(), "Egressfiewall Rules were not correctly applied!!")
exutil.By("Delete the egressfirewall and stop nbdb for one node")
removeResource(oc, true, true, "egressfirewall/default", "-n", ns)
killNBDBCmd := "crictl stop $(crictl ps | grep nbdb | awk '{print $1}')"
_, debugNodeErr := exutil.DebugNodeWithChroot(oc, node1, "bash", "-c", killNBDBCmd)
o.Expect(debugNodeErr).NotTo(o.HaveOccurred())
exutil.By("Create second namespace and two pods")
oc.SetupProject()
ns2 := oc.Namespace()
pod1ns2 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns2,
nodename: node1,
template: pingPodNodeTemplate,
}
pod1ns2.createPingPodNode(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
pod2ns2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: node2,
template: pingPodNodeTemplate,
}
pod2ns2.createPingPodNode(oc)
waitPodReady(oc, pod2ns2.namespace, pod2ns2.name)
exutil.By("Get one master node IP.")
master1, err := exutil.GetFirstMasterNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
master1IP1, master1IP2 := getNodeIP(oc, master1)
_, err = e2eoutput.RunHostCmd(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP2)
if err != nil {
g.Skip("Ping node IP failed without egressfirewall, skip the test in this environment.")
}
exutil.By("Create EgressFirewall object with nodeSelector.")
ipStackType := checkIPStackType(oc)
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0"
}
egressFW2 := egressFirewall2{
name: "default",
namespace: ns2,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
efErr := waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the master node can NOT be accessed from both pods")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP2)
return err
}, "60s", "10s").Should(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "ping -c 2 "+master1IP2)
return err
}, "10s", "5s").Should(o.HaveOccurred())
exutil.By("Label the master node which would match the egressfirewall.")
defer exutil.DeleteLabelFromNode(oc, master1, "ef-dep")
exutil.AddLabelToNode(oc, master1, "ef-dep", "qe")
exutil.By("Verify the master node can be accessed from both pods")
_, err = e2eoutput.RunHostCmdWithRetries(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP2, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmdWithRetries(pod2ns2.namespace, pod2ns2.name, "ping -c 2 "+master1IP2, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
if ipStackType == "dualstack" {
// Test node ipv6 address as well
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the master node can be accessed from both pods with IPv6")
_, err = e2eoutput.RunHostCmdWithRetries(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP1, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmdWithRetries(pod2ns2.namespace, pod2ns2.name, "ping -c 2 "+master1IP1, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
g.It("Author:asood-ConnectedOnly-High-78162-Egress traffic works with ANP and egress firewall.", func() {
ipStackType := checkIPStackType(oc)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "none")
if !(ipStackType == "ipv4single" || (acceptedPlatform && ipStackType == "dualstack")) {
g.Skip("This case should be run on UPI packet dualstack cluster or IPv4 cluster, skip other platform or network stack type.")
}
var (
testID = "78162"
testDataDir = exutil.FixturePath("testdata", "networking")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
pingPodTemplate = filepath.Join(testDataDir, "ping-for-pod-template.yaml")
egressFWTemplate = filepath.Join(testDataDir, "egressfirewall2-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
allowedIPList = []string{}
deniedIPList = []string{}
patchEfw string
patchANP string
)
exutil.By("1. Obtain the namespace")
ns := oc.Namespace()
exutil.By("2. Create a pod ")
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("3. Get an IP address for domain name www.google.com for allow rule and www.facebook.com for deny rule validation")
allowedIPv4, allowedIPv6 := getIPFromDnsName("www.google.com")
o.Expect(len(allowedIPv4) == 0).NotTo(o.BeTrue())
ipv4CIDR := allowedIPv4 + "/32"
allowedIPList = append(allowedIPList, allowedIPv4)
deniedIPv4, deniedIPv6 := getIPFromDnsName("www.facebook.com")
o.Expect(len(deniedIPv4) == 0).NotTo(o.BeTrue())
deniedIPList = append(deniedIPList, deniedIPv4)
// patch payload for egress firewall and ANP
patchEfw = "[{\"op\": \"add\", \"path\":\"/spec/egress/1\", \"value\": {\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}}]"
patchANP = "[{\"op\": \"add\", \"path\": \"/spec/egress/1\", \"value\": {\"name\":\"deny egresss\", \"action\": \"Deny\", \"to\": [{\"networks\": [\"0.0.0.0/0\"]}]}}]"
if ipStackType == "dualstack" {
if checkIPv6PublicAccess(oc) {
o.Expect(len(allowedIPv6) == 0).NotTo(o.BeTrue())
ipv6CIDR := allowedIPv6 + "/128"
allowedIPList = append(allowedIPList, allowedIPv6)
o.Expect(len(deniedIPv6) == 0).NotTo(o.BeTrue())
deniedIPList = append(deniedIPList, deniedIPv6)
patchEfw = "[{\"op\": \"add\", \"path\":\"/spec/egress/1\", \"value\": {\"type\":\"Allow\",\"to\":{\"cidrSelector\":\"" + ipv6CIDR + "\"}}}, {\"op\": \"add\", \"path\":\"/spec/egress/2\", \"value\": {\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}}, {\"op\": \"add\", \"path\":\"/spec/egress/3\", \"value\": {\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}}]"
patchANP = "[{\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\": \"" + ipv6CIDR + "\"}, {\"op\": \"add\", \"path\": \"/spec/egress/1\", \"value\": {\"name\":\"deny egresss\", \"action\": \"Deny\", \"to\": [{\"networks\": [\"0.0.0.0/0\", \"::/0\"]}]}}]"
} else {
e2e.Logf("Dual stack cluster does not have access to public websites")
}
}
egressFW := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Allow",
cidr: allowedIPv4 + "/32",
template: egressFWTemplate,
}
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-network-egress" + testID,
subjectKey: matchLabelKey,
subjectVal: ns,
priority: 10,
ruleName: "allow-to-" + ns,
ruleAction: "Allow",
cidr: ipv4CIDR,
template: anpCRTemplate,
}
exutil.By("5. Verify the intended denied IP is reachable before egress firewall is applied")
for i := 0; i < len(deniedIPList); i++ {
e2e.Logf("Verify %s is accessible before egress firewall is applied", deniedIPList[i])
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], true)
}
exutil.By("6. Create egress firewall")
egressFW.createEgressFW2Object(oc)
err := waitEgressFirewallApplied(oc, egressFW.name, ns)
o.Expect(err).NotTo(o.HaveOccurred())
patchReplaceResourceAsAdmin(oc, "egressfirewall/default", patchEfw, ns)
efwRules, efwRulesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ns, "egressfirewall", "default", "-o=jsonpath={.spec.egress}").Output()
o.Expect(efwRulesErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Egress Firewall Rules after update : %s", efwRules)
exutil.By("7. Validate traffic after egress firewall is applied")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible with just egress firewall", allowedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, allowedIPList[i], true)
exutil.By(fmt.Sprintf("Verify %s is not accessible with just egress firewall", deniedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], false)
}
exutil.By("8. Create ANP with Allow action to an IP and Deny action to all CIDRs")
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
patchReplaceResourceAsAdmin(oc, "anp/"+anpCR.name, patchANP)
anpRules, rulesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpCR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(rulesErr).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update : %s", anpRules)
exutil.By("9. Validate traffic with ANP and Egress firewall configured")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible after ANP is created", allowedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, allowedIPList[i], true)
exutil.By(fmt.Sprintf("Verify %s is not accessible after ANP is created", deniedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], false)
}
exutil.By("10. Remove Egress Firewall")
removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
exutil.By("11. Validate traffic with just ANP configured")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible after egress firewall is removed", allowedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, allowedIPList[i], true)
exutil.By(fmt.Sprintf("Verify %s is not accessible after egress firewall is removed", deniedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], false)
}
})
})
var _ = g.Describe("[sig-networking] SDN egressfirewall-techpreview", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-egressfirewall", exutil.KubeConfigPath())
g.BeforeEach(func() {
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is required for this test")
}
if checkProxy(oc) || checkDisconnect(oc) {
g.Skip("This is proxy/disconnect cluster, skip the test.")
}
ipStackType := checkIPStackType(oc)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "none")
if !(ipStackType == "ipv4single" || (acceptedPlatform && ipStackType == "dualstack")) {
g.Skip("This case should be run on UPI packet dualstack cluster or IPv4 cluster, skip other platform or network stack type.")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Critical-73723-dnsName has wildcard in EgressFirewall rules.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard-dualstack.yaml")
exutil.By("Create egressfirewall file")
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns, efwDualstack)
} else {
createResourceFromFile(oc, ns, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Verify the allowed rules which match the wildcard take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", false)
exutil.By("Update the domain name to a litlle bit long domain name.")
updateValue := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/dnsName\", \"value\":\"*.whatever.you.like.here.followed.by.svc-1.google.com\"}]"
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", updateValue).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules which match the wildcard take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "type.whatever.you.like.here.followed.by.svc-1.google.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.google.com", false)
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73724-dnsName has same wildcard domain name in EgressFirewall rules in different namespaces.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard-dualstack.yaml")
exutil.By("Create a test pod in first namespace ")
ns1 := oc.Namespace()
pod1ns1 := pingPodResource{
name: "hello-pod",
namespace: ns1,
template: pingPodTemplate,
}
pod1ns1.createPingPod(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("Create a test pod in the second namespace ")
oc.SetupProject()
ns2 := oc.Namespace()
pod1ns2 := pingPodResource{
name: "hello-pod",
namespace: ns2,
template: pingPodTemplate,
}
pod1ns2.createPingPod(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
exutil.By("Create EgressFirewall in both namespaces ")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns1, efwDualstack)
createResourceFromFile(oc, ns2, efwDualstack)
} else {
createResourceFromFile(oc, ns1, efwSingle)
createResourceFromFile(oc, ns2, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules which match the wildcard take effect for both namespace.")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.google.com", true)
exutil.By("Verify other website which doesn't match the wildcard would be blocked")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", false)
exutil.By("Update the wildcard domain name to a different one in second namespace.")
updateValue := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/dnsName\", \"value\":\"*.redhat.com\"}]"
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", updateValue).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the udpated rule taking effect in second namespace.")
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.google.com", false)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", true)
exutil.By("Verify the egressfirewall rules in first namespace still works")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
exutil.By("Remove egressfirewall in first namespace.")
removeResource(oc, true, true, "egressfirewall/default", "-n", ns1)
exutil.By("Verify no blocking for the destination domain names in first namespace")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", true)
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Critical-73719-Allowing access to DNS names even if the IP addresses associated with them changes. [Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname-dualstack.yaml")
exutil.By("Create an egressfirewall file")
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns, efwDualstack)
} else {
createResourceFromFile(oc, ns, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Verify the allowed rules take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", false)
exutil.By("Verify dnsnameresolver contains the allowed dns names.")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).To(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73721-Medium-73722-Update domain name in EgressFirewall,EgressFirewall works after restart ovnkube-node pods. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname-dualstack.yaml")
exutil.By("Create egressfirewall file")
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns, efwDualstack)
} else {
createResourceFromFile(oc, ns, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Update the domain name to a different one.")
updateValue := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/dnsName\", \"value\":\"www.redhat.com\"}]"
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", updateValue).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "registry-1.docker.io", false)
exutil.By("The dns names in dnsnameresolver get udpated as well.")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: www.redhat.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).NotTo(o.BeTrue())
exutil.By("Restart the ovnkube-node pod ")
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
podNode, err := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name)
o.Expect(err).NotTo(o.HaveOccurred())
delPodErr := oc.AsAdmin().Run("delete").Args("pod", "-l", "app=ovnkube-node", "-n", "openshift-ovn-kubernetes", "--field-selector", "spec.nodeName="+podNode).Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
exutil.By("Wait for ovnkube-node pods back up.")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("Verify the function still works")
efErr = waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "registry-1.docker.io", false)
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73720-Same domain name in different namespaces should work correctly. [Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname-dualstack.yaml")
exutil.By("Create test pod in first namespace")
ns1 := oc.Namespace()
pod1ns1 := pingPodResource{
name: "hello-pod",
namespace: ns1,
template: pingPodTemplate,
}
pod1ns1.createPingPod(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("Create test pod in second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
pod1ns2 := pingPodResource{
name: "hello-pod",
namespace: ns2,
template: pingPodTemplate,
}
pod1ns2.createPingPod(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
exutil.By("Create egressfirewall in both namespaces")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns1, efwDualstack)
createResourceFromFile(oc, ns2, efwDualstack)
} else {
createResourceFromFile(oc, ns1, efwSingle)
createResourceFromFile(oc, ns2, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules take effect on both namespaces")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", false)
exutil.By("Delete egressfirewall in second namespace")
removeResource(oc, true, true, "egressfirewall/default", "-n", ns2)
exutil.By("Verify the previous blocked dns name can be accessed.")
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", true)
exutil.By("Verify dnsnameresolver still contains the allowed dns names.")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).To(o.BeTrue())
exutil.By("Verify egressfirewall in first namespace still works")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
exutil.By("Remove one domain name in first namespace")
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"dnsName\":\"registry-1.docker.io\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
} else {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"dnsName\":\"registry-1.docker.io\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
efErr = waitEgressFirewallApplied(oc, "default", ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify removed dns name will be blocked")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.facebook.com", false)
exutil.By("Verify removed dns name was removed from dnsnameresolver as well.")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).NotTo(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).To(o.BeTrue())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
f3eb2679-6fd2-4de2-9931-3bef0571a3c1
|
ConnectedOnly-Author:huirwang-High-53223-Verify ACL audit logs can be generated for traffic hit EgressFirewall rules.
|
['"context"', '"fmt"', '"path/filepath"', '"regexp"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("ConnectedOnly-Author:huirwang-High-53223-Verify ACL audit logs can be generated for traffic hit EgressFirewall rules.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("2. Enable ACL looging on the namespace ns1 \n")
enableACLOnNamespace(oc, ns1, "info", "info")
exutil.By("3. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("4. Create an EgressFirewall \n")
egressFW1 := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate,
}
egressFW1.createEgressFWObject1(oc)
efErr := waitEgressFirewallApplied(oc, egressFW1.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("5. Check www.test.com is blocked \n")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.test.com --connect-timeout 5")
return err
}, "60s", "10s").Should(o.HaveOccurred())
exutil.By("6. Check www.redhat.com is allowed \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).ToNot(o.HaveOccurred())
exutil.By("7. Verify acl logs for egressfirewall generated. \n")
egressFwRegex := fmt.Sprintf("EF:%s:.*", ns1)
aclLogs, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
r := regexp.MustCompile(egressFwRegex)
matches := r.FindAllString(aclLogs, -1)
matched1, matchErr1 := regexp.MatchString(egressFwRegex+"verdict=drop, severity=info", aclLogs)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched1).To(o.BeTrue(), fmt.Sprintf("The egressfirewall acllogs were not generated as expected, acl logs for this namespace %s,are: \n %s", ns1, matches))
matched2, matchErr2 := regexp.MatchString(egressFwRegex+"verdict=allow, severity=info", aclLogs)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched2).To(o.BeTrue(), fmt.Sprintf("The egressfirewall acllogs were not generated as expected, acl logs for this namespace %s,are: \n %s", ns1, matches))
})
| |||||
test case
|
openshift/openshift-tests-private
|
d594d473-0dff-40af-874f-7c214170b1f0
|
ConnectedOnly-Author:huirwang-Medium-53224-Disable and enable acl logging for EgressFirewall.
|
['"context"', '"fmt"', '"path/filepath"', '"regexp"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("ConnectedOnly-Author:huirwang-Medium-53224-Disable and enable acl logging for EgressFirewall.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("2. Enable ACL looging on the namespace ns1 \n")
enableACLOnNamespace(oc, ns1, "info", "info")
exutil.By("3. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("4. Create an EgressFirewall \n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns1,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
err = waitEgressFirewallApplied(oc, egressFW2.name, ns1)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
exutil.By("6. Verify acl logs for egressfirewall generated. \n")
egressFwRegex := fmt.Sprintf("EF:%s:.*", ns1)
aclLogs, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
r := regexp.MustCompile(egressFwRegex)
matches := r.FindAllString(aclLogs, -1)
aclLogNum := len(matches)
o.Expect(aclLogNum > 0).To(o.BeTrue(), fmt.Sprintf("No matched acl logs numbers for namespace %s, and actual matched logs are: \n %v ", ns1, matches))
exutil.By("7. Disable acl logs. \n")
disableACLOnNamespace(oc, ns1)
exutil.By("8. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
g.By("9. Verify no incremental acl logs. \n")
aclLogs2, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
matches2 := r.FindAllString(aclLogs2, -1)
aclLogNum2 := len(matches2)
o.Expect(aclLogNum2 == aclLogNum).To(o.BeTrue(), fmt.Sprintf("Before disable,actual matched logs are: \n %v ,after disable,actual matched logs are: \n %v", matches, matches2))
exutil.By("10. Enable acl logs. \n")
enableACLOnNamespace(oc, ns1, "alert", "alert")
exutil.By("11. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
g.By("12. Verify new acl logs for egressfirewall generated. \n")
aclLogs3, err3 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err3).NotTo(o.HaveOccurred())
matches3 := r.FindAllString(aclLogs3, -1)
aclLogNum3 := len(matches3)
o.Expect(aclLogNum3 > aclLogNum).To(o.BeTrue(), fmt.Sprintf("Previous actual matched logs are: \n %v ,after enable again,actual matched logs are: \n %v", matches, aclLogNum3))
})
| |||||
test case
|
openshift/openshift-tests-private
|
c7f9f3d8-8b58-48d2-8564-f34d192a121b
|
ConnectedOnly-Author:huirwang-Medium-53226-The namespace enabled acl logging will not affect the namespace not enabling acl logging.
|
['"context"', '"fmt"', '"path/filepath"', '"regexp"', '"text/template"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("ConnectedOnly-Author:huirwang-Medium-53226-The namespace enabled acl logging will not affect the namespace not enabling acl logging.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("2. Enable ACL looging on the namespace ns1 \n")
enableACLOnNamespace(oc, ns1, "info", "info")
exutil.By("3. create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("4. Create an EgressFirewall \n")
egressFW1 := egressFirewall2{
name: "default",
namespace: ns1,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW1.createEgressFW2Object(oc)
defer egressFW1.deleteEgressFW2Object(oc)
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
err = waitEgressFirewallApplied(oc, egressFW1.name, ns1)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Generate egress traffic which will hit the egressfirewall. \n")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
exutil.By("6. Verify acl logs for egressfirewall generated. \n")
egressFwRegex := fmt.Sprintf("EF:%s:.*", ns1)
aclLogs, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
r := regexp.MustCompile(egressFwRegex)
matches := r.FindAllString(aclLogs, -1)
aclLogNum := len(matches)
o.Expect(aclLogNum > 0).To(o.BeTrue())
exutil.By("7. Create a new namespace. \n")
oc.SetupProject()
ns2 := oc.Namespace()
exutil.By("8. create hello pod in ns2 \n")
pod2 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns2,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns2, pod2.name)
exutil.By("9. Generate egress traffic in ns2. \n")
_, err = e2eoutput.RunHostCmd(pod2.namespace, pod2.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("10. Verify no acl logs for egressfirewall generated in ns2. \n")
egressFwRegexNs2 := fmt.Sprintf("egressFirewall_%s_.*", ns2)
o.Consistently(func() int {
aclLogs2, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err).NotTo(o.HaveOccurred())
r2 := regexp.MustCompile(egressFwRegexNs2)
matches2 := r2.FindAllString(aclLogs2, -1)
return len(matches2)
}, 10*time.Second, 5*time.Second).Should(o.Equal(0))
exutil.By("11. Create an EgressFirewall in ns2 \n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns2,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
defer egressFW2.deleteEgressFW2Object(oc)
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns2, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
err = waitEgressFirewallApplied(oc, egressFW2.name, ns2)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("12. Generate egress traffic which will hit the egressfirewall in ns2. \n")
_, err = e2eoutput.RunHostCmd(pod2.namespace, pod2.name, "curl -s www.redhat.com --connect-timeout 5")
o.Expect(err).To(o.HaveOccurred())
exutil.By("13. Verify no acl logs for egressfirewall generated in ns2. \n")
o.Consistently(func() int {
aclLogs2, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, aclLogPath).Output()
o.Expect(err).NotTo(o.HaveOccurred())
r2 := regexp.MustCompile(egressFwRegexNs2)
matches2 := r2.FindAllString(aclLogs2, -1)
return len(matches2)
}, 10*time.Second, 5*time.Second).Should(o.Equal(0))
})
| |||||
test case
|
openshift/openshift-tests-private
|
7a9b25ba-9f83-4aa5-aa4f-88c5b799f178
|
Author:huirwang-NonHyperShiftHOST-ConnectedOnly-High-55345-[FdpOvnOvs] Drop ACL for EgressFirewall should have priority lower than allow ACL despite being last in the chain.
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"text/template"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:huirwang-NonHyperShiftHOST-ConnectedOnly-High-55345-[FdpOvnOvs] Drop ACL for EgressFirewall should have priority lower than allow ACL despite being last in the chain.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate2 = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
egressFWTemplate1 = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
exutil.By("Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("create hello pod in ns1 \n")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("Create an EgressFirewall \n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns1,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate2,
}
egressFW2.createEgressFW2Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Apply another EgressFirewall with allow rules under same namespace \n")
egressFW := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate1,
}
egressFW.createEgressFWObject1(oc)
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"dnsName\":\"www.test.com\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, egressFW.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Check the result, default deny rules should have lower priority than allow rules\n")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s", ns1)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
strLines := strings.Split(listOutput, "\n")
o.Expect(len(strLines) >= 2).Should(o.BeTrue(), fmt.Sprintf("The output of acl list is not as expected,\n%s", listOutput))
var allowRules []int
var denyRule int
for _, line := range strLines {
slice := strings.Fields(line)
if strings.Contains(line, "allow") {
priority := slice[1]
intVar, _ := strconv.Atoi(priority)
allowRules = append(allowRules, intVar)
}
if strings.Contains(line, "drop") {
priority := slice[1]
denyRule, _ = strconv.Atoi(priority)
}
}
for _, allow := range allowRules {
o.Expect(allow > denyRule).Should(o.BeTrue(), fmt.Sprintf("The allow rule priority is %v, the deny rule priority is %v.", allow, denyRule))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
73f7d9f7-c57c-44f1-b9fc-5c9bdedb5c2d
|
Author:huirwang-NonHyperShiftHOST-ConnectedOnly-High-59709-[FdpOvnOvs] No duplicate egressfirewall rules in the OVN Northbound database after restart OVN master pod. [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:huirwang-NonHyperShiftHOST-ConnectedOnly-High-59709-[FdpOvnOvs] No duplicate egressfirewall rules in the OVN Northbound database after restart OVN master pod. [Disruptive]", func() {
//This is from bug https://issues.redhat.com/browse/OCPBUGS-811
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressFWTemplate1 = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
exutil.By("Obtain the namespace \n")
ns1 := oc.Namespace()
exutil.By("Create egressfirewall rules under same namespace \n")
egressFW := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate1,
}
egressFW.createEgressFWObject1(oc)
defer egressFW.deleteEgressFWObject1(oc)
efErr := waitEgressFirewallApplied(oc, egressFW.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Get the base number of egressfirewall rules\n")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s", ns1)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("The egressfirewall rules before restart ovn master pod: \n %s", listOutput)
baseCount := len(strings.Split(listOutput, "\n"))
exutil.By("Restart cluster-manager's ovnkube-node pod\n")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnMasterPodName, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("Check the result, the number of egressfirewal rules should be same as before.")
ovnMasterPodName = getOVNKMasterOVNkubeNode(oc)
listOutput, listErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("The egressfirewall rules after restart ovn master pod: \n %s", listOutput)
resultCount := len(strings.Split(listOutput, "\n"))
o.Expect(resultCount).Should(o.Equal(baseCount))
})
| |||||
test case
|
openshift/openshift-tests-private
|
07c5fc3d-027b-4dcb-84a4-d84825d1c60d
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-High-43464-EgressFirewall works with IPv6 address.
|
['"path/filepath"', '"text/template"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-High-43464-EgressFirewall works with IPv6 address.", func() {
// Note: this case focuses on Egressfirewall working with IPv6 address, as ipv6 single cluster with proxy where egressfirewall cannot work, so only test it on dual stack.
// Currently only on the UPI packet dualstack cluster, the pod can access public website with IPv6 address.
ipStackType := checkIPStackType(oc)
if ipStackType != "dualstack" || !checkIPv6PublicAccess(oc) {
g.Skip("This case should be run on UPI packet dualstack cluster, skip other platform or network stack type.")
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("Create an EgressFirewall object with rule deny.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: "::/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
defer egressFW2.deleteEgressFW2Object(oc)
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
defer pod1.deletePingPod(oc)
exutil.By("Check both ipv6 and ipv4 are blocked")
_, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -6 www.google.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -4 www.google.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
exutil.By("Remove egressfirewall object")
egressFW2.deleteEgressFW2Object(oc)
exutil.By("Create an EgressFirewall object with rule allow.")
egressFW2 = egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Allow",
cidr: "::/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
errPatch = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\"::/0\"}},{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Check both ipv4 and ipv6 destination can be accessed")
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -6 www.google.com --connect-timeout 5 -I")
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -4 www.google.com --connect-timeout 5 -I")
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
b514ff38-3646-4860-a392-00665a9dae0f
|
NonHyperShiftHOST-ConnectedOnly-Author:jechen-High-44940-No segmentation error in ovnkube-control-plane or syntax error in ovn-controller after egressfirewall resource that referencing a DNS name is deleted.
|
['"path/filepath"', '"strings"', '"text/template"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:jechen-High-44940-No segmentation error in ovnkube-control-plane or syntax error in ovn-controller after egressfirewall resource that referencing a DNS name is deleted.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
exutil.By("1. Create a new namespace, create an EgressFirewall object with references a DNS name in the namespace.")
ns := oc.Namespace()
egressFW1 := egressFirewall1{
name: "default",
namespace: ns,
template: egressFWTemplate,
}
defer egressFW1.deleteEgressFWObject1(oc)
egressFW1.createEgressFWObject1(oc)
efErr := waitEgressFirewallApplied(oc, egressFW1.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("2. Delete the EgressFirewall, check logs of ovnkube-control-plane pod for error, there should be no segementation error, no DNS value not found in dnsMap error message.")
removeResource(oc, true, true, "egressfirewall", egressFW1.name, "-n", egressFW1.namespace)
leaderCtrlPlanePod := getOVNKMasterPod(oc)
o.Expect(leaderCtrlPlanePod).ShouldNot(o.BeEmpty())
e2e.Logf("\n leaderCtrlPlanePod: %v\n", leaderCtrlPlanePod)
o.Consistently(func() bool {
podlogs, _ := oc.AsAdmin().Run("logs").Args(leaderCtrlPlanePod, "-n", "openshift-ovn-kubernetes", "-c", "ovnkube-cluster-manager").Output()
return strings.Count(podlogs, `SIGSEGV: segmentation violation`) == 0 && strings.Count(podlogs, `DNS value not found in dnsMap for domain`) == 0
}, 60*time.Second, 10*time.Second).Should(o.BeTrue(), "Segementation error or no DNS value in dnsMap error message found in ovnkube-control-plane pod log!!")
})
| |||||
test case
|
openshift/openshift-tests-private
|
7b3beee0-5a3e-4efa-b3f4-d823e7f1b3b3
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-High-37778-EgressFirewall can be deleted after the project deleted.
|
['"fmt"', '"path/filepath"', '"text/template"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-High-37778-EgressFirewall can be deleted after the project deleted.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressFWTemplate1 = filepath.Join(buildPruningBaseDir, "egressfirewall1-template.yaml")
)
exutil.By("Obtain the namespace \n")
oc.SetupProject()
ns1 := oc.Namespace()
exutil.By("Create egressfirewall rules under same namespace \n")
egressFW := egressFirewall1{
name: "default",
namespace: ns1,
template: egressFWTemplate1,
}
egressFW.createEgressFWObject1(oc)
defer egressFW.deleteEgressFWObject1(oc)
exutil.AssertWaitPollNoErr(waitEgressFirewallApplied(oc, egressFW.name, ns1), fmt.Sprintf("Wait for the egressFW/%s applied successfully timeout", egressFW.name))
exutil.By("Delete namespace .\n")
errNs := oc.WithoutNamespace().AsAdmin().Run("delete").Args("ns", ns1).Execute()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("Verify no egressfirewall object ")
outPut, errFW := oc.AsAdmin().Run("get").Args("egressfirewall", egressFW.name, "-n", ns1).Output()
o.Expect(errFW).To(o.HaveOccurred())
o.Expect(outPut).NotTo(o.ContainSubstring(egressFW.name))
exutil.By("Check ovn db, corresponding egressfirewall acls were deleted.")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s", ns1)
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("The egressfirewall rules after project deleted: \n %s", listOutput)
o.Expect(listOutput).NotTo(o.ContainSubstring("allow"))
o.Expect(listOutput).NotTo(o.ContainSubstring("drop "))
})
| |||||
test case
|
openshift/openshift-tests-private
|
57b8e1c1-be55-4829-8e24-159fa64b013d
|
ConnectedOnly-Author:huirwang-High-60488-EgressFirewall works for a nodeSelector for matchLabels.
|
['"context"', '"path/filepath"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("ConnectedOnly-Author:huirwang-High-60488-EgressFirewall works for a nodeSelector for matchLabels.", func() {
exutil.By("Label one node to match egressfirewall rule")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
ipStackType := checkIPStackType(oc)
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, node1, "ef-dep")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, node1, "ef-dep", "qe")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall3-template.yaml")
exutil.By("Get new namespace")
ns := oc.Namespace()
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0"
}
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Check the nodes can be acccessed or not")
// Will skip the test if the nodes IP cannot be pinged even without egressfirewall
node1IP1, node1IP2 := getNodeIP(oc, node1)
node2IP1, node2IP2 := getNodeIP(oc, node2)
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
if err != nil {
g.Skip("Ping node IP failed, skip the test in this environment.")
}
exutil.By("Create an EgressFirewall object with rule nodeSelector.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify the node matched egressfirewall will be allowed.")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP2)
return err
}, "10s", "5s").Should(o.HaveOccurred())
if ipStackType == "dualstack" {
// Test node ipv6 address as well
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP1)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP1)
return err
}, "10s", "5s").Should(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d36f3177-98e5-4c5f-93fd-44086417a473
|
ConnectedOnly-Author:huirwang-High-60812-EgressFirewall works for a nodeSelector for matchExpressions.
|
['"context"', '"path/filepath"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("ConnectedOnly-Author:huirwang-High-60812-EgressFirewall works for a nodeSelector for matchExpressions.", func() {
exutil.By("Label one node to match egressfirewall rule")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
ipStackType := checkIPStackType(oc)
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, node1, "ef-org")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, node1, "ef-org", "dev")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall4-template.yaml")
exutil.By("Get new namespace")
ns := oc.Namespace()
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0"
}
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Check the nodes can be acccessed or not")
// Will skip the test if the nodes IP cannot be pinged even without egressfirewall
node1IP1, node1IP2 := getNodeIP(oc, node1)
node2IP1, node2IP2 := getNodeIP(oc, node2)
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
if err != nil {
g.Skip("Ping node IP failed, skip the test in this environment.")
}
exutil.By("Create an EgressFirewall object with rule nodeSelector.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify the node matched egressfirewall will be allowed, unmatched will be blocked!!")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP2)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP2)
return err
}, "10s", "5s").Should(o.HaveOccurred())
if ipStackType == "dualstack" {
// Test node ipv6 address as well
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node1IP1)
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "ping -c 2 "+node2IP1)
return err
}, "10s", "5s").Should(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
b770dad1-ff14-46d5-907b-f0445721d3ff
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:huirwang-High-61213-Delete IGMP Groups when deleting stale chassis.[Disruptive]
|
['"fmt"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:huirwang-High-61213-Delete IGMP Groups when deleting stale chassis.[Disruptive]", func() {
// This is from bug https://issues.redhat.com/browse/OCPBUGS-7230
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
g.Skip("Skip for non-supported auto scaling machineset platforms!!")
}
clusterinfra.SkipConditionally(oc)
exutil.By("Create a new machineset with 2 nodes")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-61213"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
nodeName0 := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
nodeName1 := clusterinfra.GetNodeNameFromMachine(oc, machineName[1])
exutil.By("Obtain the namespace \n")
ns := oc.Namespace()
exutil.By("Enable multicast on namespace \n")
enableMulticast(oc, ns)
exutil.By("Delete ovnkuber-master pods and two nodes \n")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-l", "app=ovnkube-control-plane", "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
err = ms.DeleteMachineSet(oc)
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
exutil.By("Wait ovnkuber-control-plane pods ready\n")
err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
exutil.AssertWaitPollNoErr(err, "ovnkube-control-plane pods are not ready")
exutil.By("Check ovn db, the stale chassis for deleted node should be deleted")
for _, machine := range []string{nodeName0, nodeName1} {
ovnACLCmd := fmt.Sprintf("ovn-sbctl --columns _uuid,hostname list chassis")
ovnMasterSourthDBLeaderPod := getOVNKMasterOVNkubeNode(oc)
o.Eventually(func() string {
outPut, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterSourthDBLeaderPod, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
return outPut
}, "120s", "10s").ShouldNot(o.ContainSubstring(machine), "The stale chassis still existed!")
}
exutil.By("Check ovnkuber control plane logs, no IGMP_Group logs")
ovnMasterPodName := getOVNKMasterPod(oc)
searchString := "Transaction causes multiple rows in \"IGMP_Group\" table to have identical values"
logContents, logErr := exutil.GetSpecificPodLogs(oc, "openshift-ovn-kubernetes", "ovnkube-cluster-manager", ovnMasterPodName, "")
o.Expect(logErr).ShouldNot(o.HaveOccurred())
o.Expect(strings.Contains(logContents, searchString)).Should(o.BeFalse())
})
| |||||
test case
|
openshift/openshift-tests-private
|
30f29040-81c4-4171-be7b-df62370672a9
|
NonHyperShiftHOST-ConnectedOnly-NonPreRelease-PreChkUpgrade-Author:huirwang-High-62056-Check egressfirewall is functional post upgrade
|
['"fmt"', '"path/filepath"', '"text/template"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-NonPreRelease-PreChkUpgrade-Author:huirwang-High-62056-Check egressfirewall is functional post upgrade", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
egressFWTemplate = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
ns = "62056-upgrade-ns"
allowedIPList = []string{}
ipv6CIDR string
ipv4CIDR string
)
exutil.By("1. create new namespace")
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. Get an IP address for domain name www.redhat.com for allow rule ")
allowedIPv4, allowedIPv6 := getIPFromDnsName("www.redhat.com")
o.Expect(len(allowedIPv4) == 0).NotTo(o.BeTrue())
ipv4CIDR = allowedIPv4 + "/32"
allowedIPList = append(allowedIPList, allowedIPv4)
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
if checkIPv6PublicAccess(oc) {
o.Expect(len(allowedIPv6) == 0).NotTo(o.BeTrue())
ipv6CIDR = allowedIPv6 + "/128"
allowedIPList = append(allowedIPList, allowedIPv6)
} else {
e2e.Logf("Dual stack cluster does not have access to public websites for IPv6 address.")
}
}
exutil.By("3. Create an EgressFirewall object with rule deny.")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: "0.0.0.0/0",
template: egressFWTemplate,
}
egressFW2.createEgressFW2Object(oc)
exutil.By("4. Update EgressFirewall object with rule specific allow rule.")
if ipStackType == "dualstack" && checkIPv6PublicAccess(oc) {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv4CIDR+"\"}},{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv6CIDR+"\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
} else {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv4CIDR+"\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("5. Create a pod in the namespace")
createResourceFromFile(oc, ns, statefulSetHelloPod)
podErr := waitForPodWithLabelReady(oc, ns, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodname := getPodName(oc, ns, "app=hello")[0]
exutil.By("6. Check the allowed destination can be accessed!")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible with just egress firewall", allowedIPList[i]))
verifyDstIPAccess(oc, helloPodname, ns, allowedIPList[i], true)
}
exutil.By("7.Check the other website can be blocked!")
_, err = e2eoutput.RunHostCmd(ns, helloPodname, "curl yahoo.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e7fd04d8-9f2c-414f-a7f3-b962ec5aa38d
|
NonHyperShiftHOST-ConnectedOnly-NonPreRelease-PstChkUpgrade-Author:huirwang-High-62056-Check egressfirewall is functional post upgrade
|
['"fmt"', '"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-NonPreRelease-PstChkUpgrade-Author:huirwang-High-62056-Check egressfirewall is functional post upgrade", func() {
ns := "62056-upgrade-ns"
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", ns).Execute()
if nsErr != nil {
g.Skip("Skip the PstChkUpgrade test as 62056-upgrade-ns namespace does not exist, PreChkUpgrade test did not run")
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--ignore-not-found=true").Execute()
exutil.By("Verify if EgressFirewall was applied correctly")
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Get allow IP list ")
cidrList, cidrErr := oc.AsAdmin().Run("get").Args("-n", ns, "egressfirewall.k8s.ovn.org/default", "-o=jsonpath={.spec.egress[?(@.type==\"Allow\")].to.cidrSelector}").Output()
o.Expect(cidrErr).NotTo(o.HaveOccurred())
o.Expect(cidrList == "").NotTo(o.BeTrue())
e2e.Logf("The allowed destination IPs are: %s", cidrList)
// Regular expression to match IPv4 and IPv6 addresses with CIDR notation
ipRegex := `(?:\d{1,3}\.){3}\d{1,3}\/\d{1,2}|[0-9a-fA-F:]+(?::[0-9a-fA-F]{1,4}){1,7}\/\d{1,3}`
re := regexp.MustCompile(ipRegex)
matches := re.FindAllString(cidrList, -1)
var allowedIPList []string
for _, match := range matches {
// Split the match on the '/' character and take only the IP part
ip := strings.Split(match, "/")[0]
allowedIPList = append(allowedIPList, ip)
}
exutil.By("Get the pod in the namespace")
podErr := waitForPodWithLabelReady(oc, ns, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodname := getPodName(oc, ns, "app=hello")[0]
exutil.By("Check the allowed destination can be accessed!")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible with just egress firewall", allowedIPList[i]))
verifyDstIPAccess(oc, helloPodname, ns, allowedIPList[i], true)
}
exutil.By("Check the other website can be blocked!")
_, err := e2eoutput.RunHostCmd(ns, helloPodname, "curl yahoo.com --connect-timeout 5 -I")
o.Expect(err).To(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
dc92bd88-e309-42fd-96ae-0a567c257497
|
Author:jechen-NonHyperShiftHOST-ConnectedOnly-High-61176-High-61177-79704-Medium-[FdpOvnOvs] EgressFirewall with dnsName in uppercase can be created, and EgressFirewall should work with namespace that is longer than forth-three characters even after restart. [Disruptive]
|
['"path/filepath"', '"strings"', '"text/template"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-High-61176-High-61177-79704-Medium-[FdpOvnOvs] EgressFirewall with dnsName in uppercase can be created, and EgressFirewall should work with namespace that is longer than forth-three characters even after restart. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
ns := "test-egressfirewall-with-a-very-long-namespace-61176-61177"
exutil.By("1. Create a long namespace over 43 characters, create an EgressFirewall object with mixed of Allow and Deny rules.")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--ignore-not-found=true").Execute()
nsErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns).Execute()
o.Expect(nsErr).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, ns)
egressFW5 := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "dnsName",
rulevalue1: "WWW.GOOGLE.COM",
protocol1: "TCP",
portnumber1: 443,
ruletype2: "Deny",
rulename2: "dnsName",
rulevalue2: "www.facebook.com",
protocol2: "TCP",
portnumber2: 443,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW5.name, "-n", egressFW5.namespace)
egressFW5.createEgressFW5Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW5.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
e2e.Logf("\n egressfirewall is applied\n")
exutil.By("2. Create a test pod in the namespace")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc.AsAdmin())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1.name, "-n", pod1.namespace).Execute()
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("3. Check www.facebook.com is blocked \n")
o.Eventually(func() bool {
_, stderr, _ := e2eoutput.RunHostCmdWithFullOutput(pod1.namespace, pod1.name, "curl -I -k https://www.facebook.com --connect-timeout 5")
return stderr != ""
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected!!")
exutil.By("4. Check www.google.com is allowed \n")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -I -k https://www.google.com --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work as expected!!")
testPodNodeName, _ := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name)
o.Expect(testPodNodeName != "").Should(o.BeTrue())
e2e.Logf("node name for the test pod is: %v", testPodNodeName)
exutil.By("5. Check ACLs in northdb. \n")
masterOVNKubeNodePod := getOVNKMasterOVNkubeNode(oc)
o.Expect(masterOVNKubeNodePod != "").Should(o.BeTrue())
aclCmd := "ovn-nbctl --no-leader-only find acl|grep external_ids|grep test-egressfirewall-with-a-very-long-namespace ||true"
checkAclErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
aclOutput, aclErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", masterOVNKubeNodePod, aclCmd)
if aclErr != nil {
e2e.Logf("%v,Waiting for ACLs to be synced, try next ...,", aclErr)
return false, nil
}
// check ACLs rules for the long namespace
if strings.Contains(aclOutput, "test-egressfirewall-with-a-very-long-namespace") && strings.Count(aclOutput, "test-egressfirewall-with-a-very-long-namespace") == 4 {
e2e.Logf("The ACLs for egressfirewall in northbd are as expected!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkAclErr, "ACLs were not synced correctly!")
exutil.By("6. Restart OVNK nodes\n")
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
delPodErr := oc.AsAdmin().Run("delete").Args("pod", "-l", "app=ovnkube-node", "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("7. Check ACL again in northdb after restart. \n")
// since ovnkube-node pods are re-created during restart, obtain ovnMasterOVNkubeNodePod again
masterOVNKubeNodePod = getOVNKMasterOVNkubeNode(oc)
o.Expect(masterOVNKubeNodePod != "").Should(o.BeTrue())
checkAclErr = wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
aclOutput, aclErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", masterOVNKubeNodePod, aclCmd)
if aclErr != nil {
e2e.Logf("%v,Waiting for ACLs to be synced, try next ...,", aclErr)
return false, nil
}
// check ACLs rules for the long namespace after restart
if strings.Contains(aclOutput, "test-egressfirewall-with-a-very-long-namespace") && strings.Count(aclOutput, "test-egressfirewall-with-a-very-long-namespace") == 4 {
e2e.Logf("The ACLs for egressfirewall in northbd are as expected!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkAclErr, "ACLs were not synced correctly!")
exutil.By("8. Check egressfirewall rules still work correctly after restart \n")
o.Eventually(func() bool {
_, stderr, _ := e2eoutput.RunHostCmdWithFullOutput(pod1.namespace, pod1.name, "curl -I -k https://www.facebook.com --connect-timeout 5")
return stderr != ""
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work correctly after restart!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, "curl -I -k https://www.google.com --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work correctly after restart!!")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f04b644e-86cf-4a58-b2d2-0df118c489b7
|
NonHyperShiftHOST-ConnectedOnly-Author:jechen-High-37774-Set EgressFirewall to limit the pod connection to specific CIDR ranges in different namespaces.
|
['"net"', '"path/filepath"', '"text/template"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:jechen-High-37774-Set EgressFirewall to limit the pod connection to specific CIDR ranges in different namespaces.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
url1 := "www.yahoo.com" // used as Deny rule for first namespace
url2 := "www.ericsson.com" // used as Deny rule for second namespace
url3 := "www.google.com" // is not used as Deny rule in either namespace
exutil.By("1. nslookup obtain dns server ip for url1 and url2\n")
ips1, err := net.LookupIP(url1)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ip address from nslookup for %v: %v", url1, ips1)
ips2, err := net.LookupIP(url2)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ip address from lookup for %v: %v", url2, ips2)
ips3, err := net.LookupIP(url3)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ip address from lookup for %v: %v", url3, ips3)
ipStackType := checkIPStackType(oc)
e2e.Logf("\n ipStackType: %v\n", ipStackType)
// get all IPv4 and IPv6 addresses of 3 hosts above
var ipv4Addr1, ipv6Addr1, ipv4Addr2, ipv6Addr2, ipv4Addr3, ipv6Addr3 []string
for j := 0; j <= len(ips1)-1; j++ {
if IsIPv4(ips1[j].String()) {
ipv4Addr1 = append(ipv4Addr1, ips1[j].String())
}
if IsIPv6(ips1[j].String()) {
ipv6Addr1 = append(ipv6Addr1, ips1[j].String())
}
}
for j := 0; j <= len(ips2)-1; j++ {
if IsIPv4(ips2[j].String()) {
ipv4Addr2 = append(ipv4Addr2, ips2[j].String())
}
if IsIPv6(ips2[j].String()) {
ipv6Addr2 = append(ipv6Addr2, ips2[j].String())
}
}
for j := 0; j <= len(ips3)-1; j++ {
if IsIPv4(ips3[j].String()) {
ipv4Addr3 = append(ipv4Addr3, ips3[j].String())
}
if IsIPv6(ips3[j].String()) {
ipv6Addr3 = append(ipv6Addr3, ips3[j].String())
}
}
e2e.Logf("ipv4Address1: %v, ipv6Address1: %v\n\n", ipv4Addr1, ipv6Addr1)
e2e.Logf("ipv4Address2: %v, ipv6Address2: %v\n\n", ipv4Addr2, ipv6Addr2)
e2e.Logf("ipv4Address3: %v, ipv6Address3: %v\n\n", ipv4Addr3, ipv6Addr3)
//Store IPv4 addresses of the 3 hosts above in ip1, ip2, ip3
//Store IPv6 addresses of the 3 hosts above in ip4, ip5, ip6
var cidrValue1, cidrValue2, cidrValue3, cidrValue4, ip1, ip2, ip3, ip4, ip5, ip6 string
if ipStackType == "ipv6single" {
if len(ipv6Addr1) < 2 || len(ipv6Addr2) < 2 || len(ipv6Addr3) < 2 {
g.Skip("Not enough IPv6 address for the hosts that are used in this test with v6 single cluster, need two IPv6 addresses from each host, skip the test.")
}
ip1 = ipv6Addr1[0]
ip2 = ipv6Addr2[0]
ip3 = ipv6Addr3[0]
cidrValue1 = ip1 + "/128"
cidrValue2 = ip2 + "/128"
ip4 = ipv6Addr1[1]
ip5 = ipv6Addr2[1]
ip6 = ipv6Addr3[1]
cidrValue3 = ip4 + "/128"
cidrValue4 = ip5 + "/128"
} else if ipStackType == "ipv4single" {
if len(ipv4Addr1) < 2 || len(ipv4Addr2) < 2 || len(ipv4Addr3) < 2 {
g.Skip("Not enough IPv4 address for the hosts that are used in this test with V4 single cluster, need two IPv4 addresses from each host, skip the test.")
}
ip1 = ipv4Addr1[0]
ip2 = ipv4Addr2[0]
ip3 = ipv4Addr3[0]
cidrValue1 = ip1 + "/32"
cidrValue2 = ip2 + "/32"
ip4 = ipv4Addr1[1]
ip5 = ipv4Addr2[1]
ip6 = ipv4Addr3[1]
cidrValue3 = ip4 + "/32"
cidrValue4 = ip5 + "/32"
} else if ipStackType == "dualstack" {
if len(ipv4Addr1) < 1 || len(ipv4Addr2) < 1 || len(ipv4Addr3) < 1 || len(ipv6Addr1) < 1 || len(ipv6Addr2) < 1 || len(ipv6Addr3) < 1 {
g.Skip("Not enough IPv4 or IPv6 address for the hosts that are used in this test with dualstack cluster, need at least one IPv4 and one IPv6 address from each host, skip the test.")
}
ip1 = ipv4Addr1[0]
ip2 = ipv4Addr2[0]
ip3 = ipv4Addr3[0]
cidrValue1 = ip1 + "/32"
cidrValue2 = ip2 + "/32"
ip4 = ipv6Addr1[0]
ip5 = ipv6Addr2[0]
ip6 = ipv6Addr3[0]
cidrValue3 = ip4 + "/128"
cidrValue4 = ip5 + "/128"
}
e2e.Logf("\n cidrValue1: %v, cidrValue2: %v\n", cidrValue1, cidrValue2)
e2e.Logf("\n IP1: %v, IP2: %v, IP3: %v\n", ip1, ip2, ip3)
e2e.Logf("\n cidrValue3: %v, cidrValue4: %v\n", cidrValue3, cidrValue4)
e2e.Logf("\n IP4: %v, IP5: %v, IP6: %v\n", ip4, ip5, ip6)
exutil.By("2. Obtain first namespace, create egressfirewall1 in it\n")
ns1 := oc.Namespace()
egressFW1 := egressFirewall5{
name: "default",
namespace: ns1,
ruletype1: "Deny",
rulename1: "cidrSelector",
rulevalue1: cidrValue1,
protocol1: "TCP",
portnumber1: 443,
ruletype2: "Allow",
rulename2: "dnsName",
rulevalue2: "www.redhat.com",
protocol2: "TCP",
portnumber2: 443,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW1.name, "-n", egressFW1.namespace)
egressFW1.createEgressFW5Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW1.name, ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
e2e.Logf("\n egressfirewall is applied\n")
exutil.By("3. Create a test pod in first namespace")
pod1ns1 := pingPodResource{
name: "hello-pod1",
namespace: ns1,
template: pingPodTemplate,
}
pod1ns1.createPingPod(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1ns1.name, "-n", pod1ns1.namespace).Execute()
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("4. Create a second namespace, and create egressfirewall2 in it\n")
oc.SetupProject()
ns2 := oc.Namespace()
egressFW2 := egressFirewall5{
name: "default",
namespace: ns2,
ruletype1: "Deny",
rulename1: "cidrSelector",
rulevalue1: cidrValue2,
protocol1: "TCP",
portnumber1: 443,
ruletype2: "Deny",
rulename2: "dnsName",
rulevalue2: "www.redhat.com",
protocol2: "TCP",
portnumber2: 443,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW2.name, "-n", egressFW2.namespace)
egressFW2.createEgressFW5Object(oc)
efErr = waitEgressFirewallApplied(oc, egressFW2.name, ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
e2e.Logf("\n egressfirewall is applied\n")
exutil.By("5. Create a test pod in second namespace")
pod2ns2 := pingPodResource{
name: "hello-pod2",
namespace: ns2,
template: pingPodTemplate,
}
pod2ns2.createPingPod(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod2ns2.name, "-n", pod2ns2.namespace).Execute()
waitPodReady(oc, pod2ns2.namespace, pod2ns2.name)
// for v4 single, test v4 CIDR first, then test it be replaced by another v4 CIDR
// for V6 single, test v4 CIDR first, then test it be replaced by another v4 CIDR
// for dualStack, test v4 CIDR first, then test it be replaced by another v6 CIDR
var curlCmd1, curlCmd2, curlCmd3, newCurlCmd1, newCurlCmd2, newCurlCmd3 string
if ipStackType == "ipv4single" {
curlCmd1 = "curl -I -4 -k https://" + url1 + " --resolve " + url1 + ":443:" + ip1 + " --connect-timeout 5"
curlCmd2 = "curl -I -4 -k https://" + url2 + " --resolve " + url2 + ":443:" + ip2 + " --connect-timeout 5"
curlCmd3 = "curl -I -4 -k https://" + url3 + " --resolve " + url3 + ":443:" + ip3 + " --connect-timeout 5"
newCurlCmd1 = "curl -I -4 -k https://" + url1 + " --resolve " + url1 + ":443:" + ip4 + " --connect-timeout 5"
newCurlCmd2 = "curl -I -4 -k https://" + url2 + " --resolve " + url2 + ":443:" + ip5 + " --connect-timeout 5"
newCurlCmd3 = "curl -I -4 -k https://" + url3 + " --resolve " + url3 + ":443:" + ip6 + " --connect-timeout 5"
} else if ipStackType == "ipv6single" {
curlCmd1 = "curl -I -6 -k https://" + url1 + " --resolve " + url1 + ":443:[" + ip1 + "] --connect-timeout 5"
curlCmd2 = "curl -I -6 -k https://" + url2 + " --resolve " + url2 + ":443:[" + ip2 + "] --connect-timeout 5"
curlCmd3 = "curl -I -6 -k https://" + url3 + " --resolve " + url3 + ":443:[" + ip3 + "] --connect-timeout 5"
newCurlCmd1 = "curl -I -6 -k https://" + url1 + " --resolve " + url1 + ":443:[" + ip4 + "] --connect-timeout 5"
newCurlCmd2 = "curl -I -6 -k https://" + url2 + " --resolve " + url2 + ":443:[" + ip5 + "] --connect-timeout 5"
newCurlCmd3 = "curl -I -6 -k https://" + url3 + " --resolve " + url3 + ":443:[" + ip6 + "] --connect-timeout 5"
} else if ipStackType == "dualstack" { // for dualstack, use v6 CIDR to replace v4 CIDR
curlCmd1 = "curl -I -4 -k https://" + url1 + " --resolve " + url1 + ":443:" + ip1 + " --connect-timeout 5"
curlCmd2 = "curl -I -4 -k https://" + url2 + " --resolve " + url2 + ":443:" + ip2 + " --connect-timeout 5"
curlCmd3 = "curl -I -4 -k https://" + url3 + " --resolve " + url3 + ":443:" + ip3 + " --connect-timeout 5"
newCurlCmd1 = "curl -I -6 -k https://" + url1 + " --resolve " + url1 + ":443:[" + ip4 + "] --connect-timeout 5"
newCurlCmd2 = "curl -I -6 -k https://" + url2 + " --resolve " + url2 + ":443:[" + ip5 + "] --connect-timeout 5"
newCurlCmd3 = "curl -I -6 -k https://" + url3 + " --resolve " + url3 + ":443:[" + ip6 + "] --connect-timeout 5"
}
exutil.By("\n6.1. Check deny rule of first namespace is blocked from test pod of first namespace because of the deny rule in first namespace\n")
_, err1 := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, curlCmd1)
o.Expect(err1).To(o.HaveOccurred(), "curl the deny rule of first namespace from first namespace failed")
exutil.By("\n6.2. Check deny rule of second namespce is allowed from test pod of first namespace, it is not affected by deny rile in second namespace\n")
_, err2 := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, curlCmd2)
o.Expect(err2).NotTo(o.HaveOccurred(), "curl the deny rule of second namespace from first namespace failed")
exutil.By("\n6.3. Check url3 is allowed from test pod of first namespace, it is not affected by either deny rule of two namespaces\n")
_, err3 := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, curlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from first namesapce failed")
exutil.By("\n7.1. Check deny rule of first namespace is allowed from test pod of second namespace, it is not affected by deny rule in first namespace\n")
_, err1 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, curlCmd1)
o.Expect(err1).NotTo(o.HaveOccurred(), "curl the deny rule of second namespace from first namespace failed")
exutil.By("\n7.2. Check deny rule in second namespace is blocked from test pod of second namespace because of the deny rule in second namespace\n")
_, err2 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, curlCmd2)
o.Expect(err2).To(o.HaveOccurred(), "curl the deny rule of second namespace from second namespace failed")
exutil.By("\n7.3. Check url3 is allowed from test pod of second namespace, it is not affected by either deny rule of two namespaces\n")
_, err3 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, curlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from first namesapce failed")
exutil.By("\n\n8. Replace CIDR of first rule of each egressfirewall with another CIDR \n\n")
change1 := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/cidrSelector\", \"value\":\"" + cidrValue3 + "\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
change2 := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/cidrSelector\", \"value\":\"" + cidrValue4 + "\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newCidr, cidrErr := oc.AsAdmin().Run("get").Args("-n", ns1, "egressfirewall.k8s.ovn.org/default", "-o=jsonpath={.spec.egress[0].to.cidrSelector}").Output()
o.Expect(cidrErr).NotTo(o.HaveOccurred())
o.Expect(newCidr == cidrValue3).Should(o.BeTrue())
e2e.Logf("\n\nnew CIDR for first rule in first namespace %v is %v\n\n", ns1, newCidr)
newCidr, cidrErr = oc.AsAdmin().Run("get").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "-o=jsonpath={.spec.egress[0].to.cidrSelector}").Output()
o.Expect(cidrErr).NotTo(o.HaveOccurred())
o.Expect(newCidr == cidrValue4).Should(o.BeTrue())
e2e.Logf("\n\nnew CIDR for first rule in second namespace %v is %v\n\n", ns2, newCidr)
exutil.By("\n\n Repeat curl tests with after CIDR update \n\n")
exutil.By("\n8.1 Curl deny rule of first namespace from first namespace\n")
_, err1 = e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, newCurlCmd1)
o.Expect(err1).To(o.HaveOccurred(), "curl the deny rule of first namespace from first namespace failed after CIDR update")
exutil.By("\n8.2 Curl deny rule of second namespace from first namespace\n")
_, err2 = e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, newCurlCmd2)
o.Expect(err2).NotTo(o.HaveOccurred(), "curl the deny rule of second namespace from first namespace failed after CIDR update")
exutil.By("\n8.3 Curl url with no rule from first namespace\n")
_, err3 = e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, newCurlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from first namesapce failed after CIDR update")
exutil.By("\n8.4 Curl deny rule of first namespace from second namespace\n")
_, err1 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, newCurlCmd1)
o.Expect(err1).NotTo(o.HaveOccurred(), "curl the deny rule of first namespace from second namespace failed after CIDR update")
exutil.By("\n8.5 Curl deny rule of second namespace from second namespace\n")
_, err2 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, newCurlCmd2)
o.Expect(err2).To(o.HaveOccurred(), "curl the deny rule of second namespace from second namespace failed after CIDR update")
exutil.By("\n8.6 Curl url with no rule from second namespace\n")
_, err3 = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, newCurlCmd3)
o.Expect(err3).NotTo(o.HaveOccurred(), "curl url3 from second namesapce failed after CIDR update")
exutil.By("\n9. Change the Allow rule of egressfirewall of first namespace to be denied\n")
change := "[{\"op\":\"replace\",\"path\":\"/spec/egress/1/type\", \"value\":\"Deny\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns1, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// After second rule in first namespace is changed from Allow to Deny, access to www.redhat.com should be blocked from first namespace
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -4 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in first namespace after rule change for IPv4!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -4 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in second namespace for IPv4!!")
if ipStackType == "dualstack" {
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -6 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in first namespace after rule change for IPv6 !!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -6 https://www.redhat.com --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "Deny rule did not work as expected in second namespace for IPv6!!")
}
exutil.By("\n10. Change the second Deny rule of egressfirewall of second namespace to be allowed\n")
change = "[{\"op\":\"replace\",\"path\":\"/spec/egress/1/type\", \"value\":\"Allow\"}]"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", change).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// After second rule in second namespace is changed from Deny to Allow, access to www.redhat.com should be still be blocked from first namespace but allowed from second namespace
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -4 https://www.redhat.com/en --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "After rule change, Allow rule in second namespace does not affect first namespace for IPv4!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -4 https://www.redhat.com/en --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work as expected in second namespace after rule change for IPv4!!")
if ipStackType == "dualstack" {
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod1ns1.namespace, pod1ns1.name, "curl -I -6 https://www.redhat.com/en --connect-timeout 5")
return err != nil
}, "120s", "10s").Should(o.BeTrue(), "After rule change, Allow rule in second namespace does not affect first namespace for IPv6!!")
o.Eventually(func() bool {
_, err := e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "curl -I -6 https://www.redhat.com/en --connect-timeout 5")
return err == nil
}, "120s", "10s").Should(o.BeTrue(), "Allow rule did not work as expected in second namespace after rule change for IPv6 !!")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
4c2d0742-6fd9-4b3b-9ddf-95b3b6a1def8
|
ConnectedOnly-Author:huirwang-High-65173-Misconfigured Egress Firewall can be corrected.
|
['"fmt"', '"path/filepath"', '"strings"', '"text/template"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("ConnectedOnly-Author:huirwang-High-65173-Misconfigured Egress Firewall can be corrected.", func() {
//This is from customer bug https://issues.redhat.com/browse/OCPBUGS-15182
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressFWTemplate2 = filepath.Join(buildPruningBaseDir, "egressfirewall2-template.yaml")
)
exutil.By("Obtain the namespace \n")
ns := oc.Namespace()
exutil.By("Create an EgressFirewall with missing cidr prefix\n")
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: "1.1.1.1",
template: egressFWTemplate2,
}
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify EgressFirewall was not applied correctly\n")
checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
output, efErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", "-n", ns, egressFW2.name).Output()
if efErr != nil {
e2e.Logf("Failed to get egressfirewall %v, error: %s. Trying again", egressFW2, efErr)
return false, nil
}
if !strings.Contains(output, "EgressFirewall Rules not correctly applied") {
e2e.Logf("The egressfirewall output message not expexted, trying again. \n %s", output)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("EgressFirewall with missing cidr prefix should not be applied correctly!"))
exutil.By("Apply EgressFirewall again with correct cidr\n")
egressFW2.cidr = "1.1.1.0/24"
egressFW2.createEgressFW2Object(oc)
exutil.By("Verify EgressFirewall was applied correctly\n")
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ae842344-379b-49fb-928d-1f838e18bf5c
|
Author:jechen-NonHyperShiftHOST-ConnectedOnly-High-72054-EgressFirewall rules should include all IPs of matched node when nodeSelector is used.
|
['"context"', '"fmt"', '"path/filepath"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-High-72054-EgressFirewall rules should include all IPs of matched node when nodeSelector is used.", func() {
// https://issues.redhat.com/browse/OCPBUGS-13665
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall3-template.yaml")
exutil.By("1. Label one node to match egressfirewall rule")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
// node1 is going to be labelled to be a matched node, node2 is not labelled so it is not a matched node
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, node1, "ef-dep")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, node1, "ef-dep", "qe")
// Get all host IPs of both nodes
allNode1IPsv4, allNode1IPsv6 := getAllHostCIDR(oc, node1)
allNode2IPsv4, allNode2IPRv6 := getAllHostCIDR(oc, node2)
exutil.By("2. Get new namespace")
ns := oc.Namespace()
exutil.By("3. Create a pod in the namespace")
testPod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
testPod.createPingPod(oc)
waitPodReady(oc, testPod.namespace, testPod.name)
exutil.By("4.Check the nodes can be acccessed before egressFirewall with nodeSelector is applied")
if !checkNodeAccessibilityFromAPod(oc, node1, testPod.namespace, testPod.name) || !checkNodeAccessibilityFromAPod(oc, node2, testPod.namespace, testPod.name) {
g.Skip("Pre-test check failed, test is skipped!")
}
exutil.By(" 5. Create an egressFirewall with rule nodeSelector.")
ipStackType := checkIPStackType(oc)
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0" // for Dualstack, test with v4 CIDR first, then test V6 CIDR later
}
egressFW2 := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
efErr := waitEgressFirewallApplied(oc, egressFW2.name, ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By(" 6. Verify Egress firewall rules in NBDB of all nodes.")
ovnACLCmd := fmt.Sprintf("ovn-nbctl --format=table --no-heading --columns=action,priority,match find acl external_ids:k8s.ovn.org/name=%s | grep allow", ns)
nodelist, nodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(nodeErr).NotTo(o.HaveOccurred())
o.Expect(len(nodelist)).NotTo(o.BeEquivalentTo(0))
for _, eachNode := range nodelist {
ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", eachNode)
o.Expect(podErr).NotTo(o.HaveOccurred())
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKubePod, ovnACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
// egressFirewall rules should include all the IPs of the matched node1 in NBDB, but do not include IPs for unmatched node2
if ipStackType == "dualstack" || ipStackType == "ipv4single" {
for _, nodeIPv4Addr := range allNode1IPsv4 {
o.Expect(listOutput).Should(o.ContainSubstring(nodeIPv4Addr), fmt.Sprintf("%s for node %s is not in egressfirewall rules as expected", nodeIPv4Addr, node1))
}
for _, nodeIPv4Addr := range allNode2IPsv4 {
o.Expect(listOutput).ShouldNot(o.ContainSubstring(nodeIPv4Addr), fmt.Sprintf("%s for node %s should not be in egressfirewall rules", nodeIPv4Addr, node2))
}
}
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
for _, nodeIPv6Addr := range allNode1IPsv6 {
o.Expect(listOutput).Should(o.ContainSubstring(nodeIPv6Addr), fmt.Sprintf("%s for node %s is not in egressfirewall rules as expected", nodeIPv6Addr, node1))
}
for _, nodeIPv6Addr := range allNode2IPRv6 {
o.Expect(listOutput).ShouldNot(o.ContainSubstring(nodeIPv6Addr), fmt.Sprintf("%s for node %s should not be in egressfirewall rules", nodeIPv6Addr, node2))
}
}
}
exutil.By(" 7. Verified matched node can be accessed from all its interfaces, unmatched node can not be accessed from any of its interfaces.")
result1 := checkNodeAccessibilityFromAPod(oc, node1, testPod.namespace, testPod.name)
o.Expect(result1).Should(o.BeTrue())
result2 := checkNodeAccessibilityFromAPod(oc, node2, testPod.namespace, testPod.name)
o.Expect(result2).Should(o.BeFalse())
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
// Delete original egressFirewall, recreate the egressFirewall with IPv6 CIDR, then check access to nodes through IPv6 interfaces
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
result1 := checkNodeAccessibilityFromAPod(oc, node1, testPod.namespace, testPod.name)
o.Expect(result1).Should(o.BeTrue())
result2 := checkNodeAccessibilityFromAPod(oc, node2, testPod.namespace, testPod.name)
o.Expect(result2).Should(o.BeFalse())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
4b3ae40c-6605-4ef4-9f66-7690826079c3
|
Author:huirwang-ConnectedOnly-Medium-67491-[FdpOvnOvs] EgressFirewall works with ANP, BANP and NP for egress traffic.
|
['"path/filepath"', '"strings"', '"text/template"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:huirwang-ConnectedOnly-Medium-67491-[FdpOvnOvs] EgressFirewall works with ANP, BANP and NP for egress traffic.", func() {
ipStackType := checkIPStackType(oc)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "none")
if !(ipStackType == "ipv4single" || (acceptedPlatform && ipStackType == "dualstack")) {
g.Skip("This case should be run on UPI packet dualstack cluster or IPv4 cluster, skip other platform or network stack type.")
}
var (
testID = "67491"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
pingPodTemplate = filepath.Join(testDataDir, "ping-for-pod-template.yaml")
egressFWTemplate = filepath.Join(testDataDir, "egressfirewall2-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-egress-single-CIDR-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
)
exutil.By("Get test namespace")
ns := oc.Namespace()
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("4. Create a Baseline Admin Network Policy with deny action to cidr")
banpCR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: ns,
ruleName: "default-deny-to-" + ns,
ruleAction: "Deny",
cidr: "0.0.0.0/0",
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("Get one IP address for domain name www.google.com")
ipv4, ipv6 := getIPFromDnsName("www.google.com")
o.Expect(len(ipv4) == 0).NotTo(o.BeTrue())
exutil.By("Create an EgressFirewall \n")
egressFW := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Allow",
cidr: ipv4 + "/32",
template: egressFWTemplate,
}
egressFW.createEgressFW2Object(oc)
err = waitEgressFirewallApplied(oc, egressFW.name, ns)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv4, false)
exutil.By("Remove BANP")
removeResource(oc, true, true, "banp", banpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv4, true)
exutil.By("Create ANP with deny action to cidr")
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-" + testID,
subjectKey: matchLabelKey,
subjectVal: ns,
priority: 10,
ruleName: "allow-to-" + ns,
ruleAction: "Deny",
cidr: "0.0.0.0/0",
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv4, false)
exutil.By("Remove ANP")
removeResource(oc, true, true, "anp", anpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv4, true)
exutil.By("Create Network Policy with limited access to cidr which is not same as egressfirewall")
npIPBlock := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: "1.1.1.1/32",
namespace: ns,
}
npIPBlock.createipBlockCIDRObjectSingle(oc)
output, err = oc.AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress"))
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv4, false)
exutil.By("Remove network policy")
removeResource(oc, true, true, "-n", ns, "networkpolicy", npIPBlock.name)
if ipStackType == "dualstack" {
// Retest with ipv6 address
if !checkIPv6PublicAccess(oc) {
g.Skip("Not be able to access the public website with IPv6,skip below test steps!!")
}
o.Expect(len(ipv6) == 0).NotTo(o.BeTrue())
exutil.By("Create ANP with deny action to ipv6 cidr")
banpCR.cidr = "::/0"
banpCR.createSingleRuleCIDRBANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("Update egressfirewall with ipv6 address")
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"cidrSelector\":\""+ipv6+"/128\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv6, false)
exutil.By("Remove BANP")
removeResource(oc, true, true, "banp", banpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv6, true)
exutil.By("Create ANP")
anpCR.cidr = "::/0"
anpCR.createSingleRuleCIDRANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv6, false)
exutil.By("Remove ANP")
removeResource(oc, true, true, "anp", anpCR.name)
verifyDstIPAccess(oc, pod1.name, ns, ipv6, true)
exutil.By("Create Network Policy")
npIPBlock.cidr = "2001::02/128"
npIPBlock.createipBlockCIDRObjectSingle(oc)
output, err = oc.AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress"))
exutil.By("Verify destination got blocked")
verifyDstIPAccess(oc, pod1.name, ns, ipv6, false)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d1b779c6-108c-4a7e-b9c1-ce8f3ee1930a
|
Author:huirwang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-74657-EgressFirewall nodeSelector works after some specific operations. [Disruptive]
|
['"context"', '"fmt"', '"os"', '"path/filepath"', '"strings"', '"text/template"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:huirwang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-74657-EgressFirewall nodeSelector works after some specific operations. [Disruptive]", func() {
//https://issues.redhat.com/browse/OCPBUGS-34331
exutil.By("Get worker nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough worker nodes for this test, skip the case!!")
}
node1 := nodeList.Items[0].Name
node2 := nodeList.Items[1].Name
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall3-template.yaml")
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create egressfirewall from file")
type egressFirewallConfig struct {
Domains []string
}
outputEFFilePath := "/tmp/egress_firewall_8000.yaml"
domainsPerFile := 7999
yamlTemplate := `apiVersion: k8s.ovn.org/v1
kind: EgressFirewall
metadata:
name: default
spec:
egress:
{{- range .Domains }}
- type: Allow
to:
dnsName: {{ . }}
{{- end }}
- type: Deny
to:
cidrSelector: 0.0.0.0/0
`
// Parse the YAML template
tmpl, err := template.New("egressFirewall").Parse(yamlTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
// Generate the egressfirewall file
domains := make([]string, domainsPerFile)
for i := 0; i < domainsPerFile; i++ {
domains[i] = fmt.Sprintf("fake-domain-%d.com", i+1)
}
// Create the EgressFirewallConfig struct
config := egressFirewallConfig{Domains: domains}
// Open the output file
defer os.Remove(outputEFFilePath)
outputFile, err := os.Create(outputEFFilePath)
o.Expect(err).NotTo(o.HaveOccurred())
// Execute the template and write to the file
err = tmpl.Execute(outputFile, config)
o.Expect(err).NotTo(o.HaveOccurred())
outputFile.Close()
e2e.Logf("Successfully generated %s\n", outputEFFilePath)
err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f", outputEFFilePath, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
o.Eventually(func() bool {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall/default", "-n", ns).Output()
e2e.Logf("\n EgressFiewall status: %v\n", output)
return strings.Contains(output, "EgressFirewall Rules applied")
}, "1800s", "30s").Should(o.BeTrue(), "Egressfiewall Rules were not correctly applied!!")
exutil.By("Delete the egressfirewall and stop nbdb for one node")
removeResource(oc, true, true, "egressfirewall/default", "-n", ns)
killNBDBCmd := "crictl stop $(crictl ps | grep nbdb | awk '{print $1}')"
_, debugNodeErr := exutil.DebugNodeWithChroot(oc, node1, "bash", "-c", killNBDBCmd)
o.Expect(debugNodeErr).NotTo(o.HaveOccurred())
exutil.By("Create second namespace and two pods")
oc.SetupProject()
ns2 := oc.Namespace()
pod1ns2 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns2,
nodename: node1,
template: pingPodNodeTemplate,
}
pod1ns2.createPingPodNode(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
pod2ns2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: node2,
template: pingPodNodeTemplate,
}
pod2ns2.createPingPodNode(oc)
waitPodReady(oc, pod2ns2.namespace, pod2ns2.name)
exutil.By("Get one master node IP.")
master1, err := exutil.GetFirstMasterNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
master1IP1, master1IP2 := getNodeIP(oc, master1)
_, err = e2eoutput.RunHostCmd(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP2)
if err != nil {
g.Skip("Ping node IP failed without egressfirewall, skip the test in this environment.")
}
exutil.By("Create EgressFirewall object with nodeSelector.")
ipStackType := checkIPStackType(oc)
var cidrValue string
if ipStackType == "ipv6single" {
cidrValue = "::/0"
} else {
cidrValue = "0.0.0.0/0"
}
egressFW2 := egressFirewall2{
name: "default",
namespace: ns2,
ruletype: "Deny",
cidr: cidrValue,
template: egressFWTemplate,
}
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
efErr := waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the master node can NOT be accessed from both pods")
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP2)
return err
}, "60s", "10s").Should(o.HaveOccurred())
o.Eventually(func() error {
_, err = e2eoutput.RunHostCmd(pod2ns2.namespace, pod2ns2.name, "ping -c 2 "+master1IP2)
return err
}, "10s", "5s").Should(o.HaveOccurred())
exutil.By("Label the master node which would match the egressfirewall.")
defer exutil.DeleteLabelFromNode(oc, master1, "ef-dep")
exutil.AddLabelToNode(oc, master1, "ef-dep", "qe")
exutil.By("Verify the master node can be accessed from both pods")
_, err = e2eoutput.RunHostCmdWithRetries(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP2, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmdWithRetries(pod2ns2.namespace, pod2ns2.name, "ping -c 2 "+master1IP2, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
if ipStackType == "dualstack" {
// Test node ipv6 address as well
egressFW2.deleteEgressFW2Object(oc)
egressFW2.cidr = "::/0"
defer egressFW2.deleteEgressFW2Object(oc)
egressFW2.createEgressFW2Object(oc)
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the master node can be accessed from both pods with IPv6")
_, err = e2eoutput.RunHostCmdWithRetries(pod1ns2.namespace, pod1ns2.name, "ping -c 2 "+master1IP1, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmdWithRetries(pod2ns2.namespace, pod2ns2.name, "ping -c 2 "+master1IP1, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
a676855f-4877-4df5-aef1-81fa89a94d6c
|
Author:asood-ConnectedOnly-High-78162-Egress traffic works with ANP and egress firewall.
|
['"fmt"', '"path/filepath"', '"strings"', '"text/template"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("Author:asood-ConnectedOnly-High-78162-Egress traffic works with ANP and egress firewall.", func() {
ipStackType := checkIPStackType(oc)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "none")
if !(ipStackType == "ipv4single" || (acceptedPlatform && ipStackType == "dualstack")) {
g.Skip("This case should be run on UPI packet dualstack cluster or IPv4 cluster, skip other platform or network stack type.")
}
var (
testID = "78162"
testDataDir = exutil.FixturePath("testdata", "networking")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
pingPodTemplate = filepath.Join(testDataDir, "ping-for-pod-template.yaml")
egressFWTemplate = filepath.Join(testDataDir, "egressfirewall2-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
allowedIPList = []string{}
deniedIPList = []string{}
patchEfw string
patchANP string
)
exutil.By("1. Obtain the namespace")
ns := oc.Namespace()
exutil.By("2. Create a pod ")
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("3. Get an IP address for domain name www.google.com for allow rule and www.facebook.com for deny rule validation")
allowedIPv4, allowedIPv6 := getIPFromDnsName("www.google.com")
o.Expect(len(allowedIPv4) == 0).NotTo(o.BeTrue())
ipv4CIDR := allowedIPv4 + "/32"
allowedIPList = append(allowedIPList, allowedIPv4)
deniedIPv4, deniedIPv6 := getIPFromDnsName("www.facebook.com")
o.Expect(len(deniedIPv4) == 0).NotTo(o.BeTrue())
deniedIPList = append(deniedIPList, deniedIPv4)
// patch payload for egress firewall and ANP
patchEfw = "[{\"op\": \"add\", \"path\":\"/spec/egress/1\", \"value\": {\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}}]"
patchANP = "[{\"op\": \"add\", \"path\": \"/spec/egress/1\", \"value\": {\"name\":\"deny egresss\", \"action\": \"Deny\", \"to\": [{\"networks\": [\"0.0.0.0/0\"]}]}}]"
if ipStackType == "dualstack" {
if checkIPv6PublicAccess(oc) {
o.Expect(len(allowedIPv6) == 0).NotTo(o.BeTrue())
ipv6CIDR := allowedIPv6 + "/128"
allowedIPList = append(allowedIPList, allowedIPv6)
o.Expect(len(deniedIPv6) == 0).NotTo(o.BeTrue())
deniedIPList = append(deniedIPList, deniedIPv6)
patchEfw = "[{\"op\": \"add\", \"path\":\"/spec/egress/1\", \"value\": {\"type\":\"Allow\",\"to\":{\"cidrSelector\":\"" + ipv6CIDR + "\"}}}, {\"op\": \"add\", \"path\":\"/spec/egress/2\", \"value\": {\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}}, {\"op\": \"add\", \"path\":\"/spec/egress/3\", \"value\": {\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}}]"
patchANP = "[{\"op\": \"add\", \"path\": \"/spec/egress/0/to/0/networks/1\", \"value\": \"" + ipv6CIDR + "\"}, {\"op\": \"add\", \"path\": \"/spec/egress/1\", \"value\": {\"name\":\"deny egresss\", \"action\": \"Deny\", \"to\": [{\"networks\": [\"0.0.0.0/0\", \"::/0\"]}]}}]"
} else {
e2e.Logf("Dual stack cluster does not have access to public websites")
}
}
egressFW := egressFirewall2{
name: "default",
namespace: ns,
ruletype: "Allow",
cidr: allowedIPv4 + "/32",
template: egressFWTemplate,
}
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-network-egress" + testID,
subjectKey: matchLabelKey,
subjectVal: ns,
priority: 10,
ruleName: "allow-to-" + ns,
ruleAction: "Allow",
cidr: ipv4CIDR,
template: anpCRTemplate,
}
exutil.By("5. Verify the intended denied IP is reachable before egress firewall is applied")
for i := 0; i < len(deniedIPList); i++ {
e2e.Logf("Verify %s is accessible before egress firewall is applied", deniedIPList[i])
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], true)
}
exutil.By("6. Create egress firewall")
egressFW.createEgressFW2Object(oc)
err := waitEgressFirewallApplied(oc, egressFW.name, ns)
o.Expect(err).NotTo(o.HaveOccurred())
patchReplaceResourceAsAdmin(oc, "egressfirewall/default", patchEfw, ns)
efwRules, efwRulesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ns, "egressfirewall", "default", "-o=jsonpath={.spec.egress}").Output()
o.Expect(efwRulesErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Egress Firewall Rules after update : %s", efwRules)
exutil.By("7. Validate traffic after egress firewall is applied")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible with just egress firewall", allowedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, allowedIPList[i], true)
exutil.By(fmt.Sprintf("Verify %s is not accessible with just egress firewall", deniedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], false)
}
exutil.By("8. Create ANP with Allow action to an IP and Deny action to all CIDRs")
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
patchReplaceResourceAsAdmin(oc, "anp/"+anpCR.name, patchANP)
anpRules, rulesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("adminnetworkpolicy", anpCR.name, "-o=jsonpath={.spec.egress}").Output()
o.Expect(rulesErr).NotTo(o.HaveOccurred())
e2e.Logf("\n ANP Rules after update : %s", anpRules)
exutil.By("9. Validate traffic with ANP and Egress firewall configured")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible after ANP is created", allowedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, allowedIPList[i], true)
exutil.By(fmt.Sprintf("Verify %s is not accessible after ANP is created", deniedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], false)
}
exutil.By("10. Remove Egress Firewall")
removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
exutil.By("11. Validate traffic with just ANP configured")
for i := 0; i < len(allowedIPList); i++ {
exutil.By(fmt.Sprintf("Verify %s is accessible after egress firewall is removed", allowedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, allowedIPList[i], true)
exutil.By(fmt.Sprintf("Verify %s is not accessible after egress firewall is removed", deniedIPList[i]))
verifyDstIPAccess(oc, pod.name, ns, deniedIPList[i], false)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
bf6eb2e1-28e5-45b3-852d-827d7684e292
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Critical-73723-dnsName has wildcard in EgressFirewall rules.
|
['"path/filepath"', '"text/template"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Critical-73723-dnsName has wildcard in EgressFirewall rules.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard-dualstack.yaml")
exutil.By("Create egressfirewall file")
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns, efwDualstack)
} else {
createResourceFromFile(oc, ns, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Verify the allowed rules which match the wildcard take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", false)
exutil.By("Update the domain name to a litlle bit long domain name.")
updateValue := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/dnsName\", \"value\":\"*.whatever.you.like.here.followed.by.svc-1.google.com\"}]"
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", updateValue).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules which match the wildcard take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "type.whatever.you.like.here.followed.by.svc-1.google.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.google.com", false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
fac5120f-2568-4524-aea3-a251f0656047
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73724-dnsName has same wildcard domain name in EgressFirewall rules in different namespaces.
|
['"path/filepath"', '"text/template"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73724-dnsName has same wildcard domain name in EgressFirewall rules in different namespaces.", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-wildcard-dualstack.yaml")
exutil.By("Create a test pod in first namespace ")
ns1 := oc.Namespace()
pod1ns1 := pingPodResource{
name: "hello-pod",
namespace: ns1,
template: pingPodTemplate,
}
pod1ns1.createPingPod(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("Create a test pod in the second namespace ")
oc.SetupProject()
ns2 := oc.Namespace()
pod1ns2 := pingPodResource{
name: "hello-pod",
namespace: ns2,
template: pingPodTemplate,
}
pod1ns2.createPingPod(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
exutil.By("Create EgressFirewall in both namespaces ")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns1, efwDualstack)
createResourceFromFile(oc, ns2, efwDualstack)
} else {
createResourceFromFile(oc, ns1, efwSingle)
createResourceFromFile(oc, ns2, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules which match the wildcard take effect for both namespace.")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.google.com", true)
exutil.By("Verify other website which doesn't match the wildcard would be blocked")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", false)
exutil.By("Update the wildcard domain name to a different one in second namespace.")
updateValue := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/dnsName\", \"value\":\"*.redhat.com\"}]"
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns2, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", updateValue).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the udpated rule taking effect in second namespace.")
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.google.com", false)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", true)
exutil.By("Verify the egressfirewall rules in first namespace still works")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
exutil.By("Remove egressfirewall in first namespace.")
removeResource(oc, true, true, "egressfirewall/default", "-n", ns1)
exutil.By("Verify no blocking for the destination domain names in first namespace")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.google.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
d770150f-9ddf-4c59-b58a-bd4660619b54
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Critical-73719-Allowing access to DNS names even if the IP addresses associated with them changes. [Serial]
|
['"path/filepath"', '"strings"', '"text/template"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Critical-73719-Allowing access to DNS names even if the IP addresses associated with them changes. [Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname-dualstack.yaml")
exutil.By("Create an egressfirewall file")
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns, efwDualstack)
} else {
createResourceFromFile(oc, ns, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Verify the allowed rules take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", false)
exutil.By("Verify dnsnameresolver contains the allowed dns names.")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
4f951a94-e699-4c8a-836a-ea19eeedb528
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73721-Medium-73722-Update domain name in EgressFirewall,EgressFirewall works after restart ovnkube-node pods. [Disruptive]
|
['"path/filepath"', '"strings"', '"text/template"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73721-Medium-73722-Update domain name in EgressFirewall,EgressFirewall works after restart ovnkube-node pods. [Disruptive]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname-dualstack.yaml")
exutil.By("Create egressfirewall file")
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns, efwDualstack)
} else {
createResourceFromFile(oc, ns, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Update the domain name to a different one.")
updateValue := "[{\"op\":\"replace\",\"path\":\"/spec/egress/0/to/dnsName\", \"value\":\"www.redhat.com\"}]"
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", ns, "egressfirewall.k8s.ovn.org/default", "--type=json", "-p", updateValue).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules take effect.")
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "registry-1.docker.io", false)
exutil.By("The dns names in dnsnameresolver get udpated as well.")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: www.redhat.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).NotTo(o.BeTrue())
exutil.By("Restart the ovnkube-node pod ")
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
podNode, err := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name)
o.Expect(err).NotTo(o.HaveOccurred())
delPodErr := oc.AsAdmin().Run("delete").Args("pod", "-l", "app=ovnkube-node", "-n", "openshift-ovn-kubernetes", "--field-selector", "spec.nodeName="+podNode).Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
exutil.By("Wait for ovnkube-node pods back up.")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("Verify the function still works")
efErr = waitEgressFirewallApplied(oc, "default", ns)
o.Expect(efErr).NotTo(o.HaveOccurred())
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.redhat.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1.name, pod1.namespace, "registry-1.docker.io", false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5e8645a4-b48d-46a9-a4c2-f64e760cefe5
|
NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73720-Same domain name in different namespaces should work correctly. [Serial]
|
['"path/filepath"', '"strings"', '"text/template"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressfirewall.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:huirwang-Medium-73720-Same domain name in different namespaces should work correctly. [Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
efwSingle := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname.yaml")
efwDualstack := filepath.Join(buildPruningBaseDir, "egressfirewall/egressfirewall-specific-dnsname-dualstack.yaml")
exutil.By("Create test pod in first namespace")
ns1 := oc.Namespace()
pod1ns1 := pingPodResource{
name: "hello-pod",
namespace: ns1,
template: pingPodTemplate,
}
pod1ns1.createPingPod(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("Create test pod in second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
pod1ns2 := pingPodResource{
name: "hello-pod",
namespace: ns2,
template: pingPodTemplate,
}
pod1ns2.createPingPod(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
exutil.By("Create egressfirewall in both namespaces")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
createResourceFromFile(oc, ns1, efwDualstack)
createResourceFromFile(oc, ns2, efwDualstack)
} else {
createResourceFromFile(oc, ns1, efwSingle)
createResourceFromFile(oc, ns2, efwSingle)
}
efErr := waitEgressFirewallApplied(oc, "default", ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
efErr = waitEgressFirewallApplied(oc, "default", ns2)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify the allowed rules take effect on both namespaces")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", false)
exutil.By("Delete egressfirewall in second namespace")
removeResource(oc, true, true, "egressfirewall/default", "-n", ns2)
exutil.By("Verify the previous blocked dns name can be accessed.")
verifyDesitnationAccess(oc, pod1ns2.name, pod1ns2.namespace, "www.redhat.com", true)
exutil.By("Verify dnsnameresolver still contains the allowed dns names.")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).To(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).To(o.BeTrue())
exutil.By("Verify egressfirewall in first namespace still works")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.facebook.com", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.redhat.com", false)
exutil.By("Remove one domain name in first namespace")
if ipStackType == "dualstack" {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"dnsName\":\"registry-1.docker.io\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"::/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
} else {
errPatch := oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressfirewall.k8s.ovn.org/default", "-n", ns1, "-p", "{\"spec\":{\"egress\":[{\"type\":\"Allow\",\"to\":{\"dnsName\":\"registry-1.docker.io\"}},{\"type\":\"Deny\",\"to\":{\"cidrSelector\":\"0.0.0.0/0\"}}]}}", "--type=merge").Execute()
o.Expect(errPatch).NotTo(o.HaveOccurred())
}
efErr = waitEgressFirewallApplied(oc, "default", ns1)
o.Expect(efErr).NotTo(o.HaveOccurred())
exutil.By("Verify removed dns name will be blocked")
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "registry-1.docker.io", true)
verifyDesitnationAccess(oc, pod1ns1.name, pod1ns1.namespace, "www.facebook.com", false)
exutil.By("Verify removed dns name was removed from dnsnameresolver as well.")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dnsnameresolver", "-n", "openshift-ovn-kubernetes", "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The dnsnameresolver output is : \n %s ", output)
o.Expect(strings.Contains(output, "dnsName: www.facebook.com")).NotTo(o.BeTrue())
o.Expect(strings.Contains(output, "dnsName: registry-1.docker.io")).To(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
d4cfc8c2-a93b-47b5-a2ac-caf3690568ef
|
egressip_udn
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
netutils "k8s.io/utils/net"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
netutils "k8s.io/utils/net"
)
var _ = g.Describe("[sig-networking] SDN udn EgressIP", func() {
defer g.GinkgoRecover()
var (
egressNodeLabel = "k8s.ovn.org/egress-assignable"
oc = exutil.NewCLI("networking-"+getRandomString(), exutil.KubeConfigPath())
)
g.BeforeEach(func() {
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
platform := exutil.CheckPlatform(oc)
networkType := checkNetworkType(oc)
e2e.Logf("\n\nThe platform is %v, networkType is %v\n", platform, networkType)
acceptedPlatform := strings.Contains(platform, "aws") || strings.Contains(platform, "gcp") || strings.Contains(platform, "openstack") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "azure") || strings.Contains(platform, "none") || strings.Contains(platform, "nutanix") || strings.Contains(platform, "powervs")
if !acceptedPlatform || !strings.Contains(networkType, "ovn") {
g.Skip("Test cases should be run on AWS/GCP/Azure/Openstack/Vsphere/BareMetal/Nutanix/Powervs cluster with ovn network plugin, skip for other platforms or other non-OVN network plugin!!")
}
ipStackType := checkIPStackType(oc)
if ipStackType == "ipv6single" {
// Not able to run on IPv6 single cluster for now due to cluster disconnect limiation.
g.Skip("Skip IPv6 Single cluster.")
}
if !(strings.Contains(platform, "none") || strings.Contains(platform, "powervs")) && (checkProxy(oc) || checkDisconnect(oc)) {
g.Skip("This is proxy/disconnect cluster, skip the test.")
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-77654-Validate egressIP with mixed of multiple non-overlapping UDNs and default network(layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allNS []string
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
theOtherNode := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create four more namespaces")
// first namespace is used for default network, second and third namespaces will be used for layer3 UDNs, last two namespaces will be used for layer2 UDN
ns := oc.Namespace()
allNS = append(allNS, ns)
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3 Create two different layer3 UDNs in 2nd and 3rd namespaces, two different layer2 UDN in last two namespaces")
cidr := []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48", "2011:100:200::0/48"}
for i := 1; i < 3; i++ {
createGeneralUDNCRD(oc, allNS[i], "udn-network-layer3-"+allNS[i], ipv4cidr[i-1], ipv6cidr[i-1], cidr[i-1], "layer3")
createGeneralUDNCRD(oc, allNS[i+2], "udn-network-layer2-"+allNS[i+2], ipv4cidr[i-1], ipv6cidr[i-1], cidr[i-1], "layer2")
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77654",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
// testpods1 are local pods that co-locate on egress node
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: egressNode,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods1 are remote pods on the other non-egress node
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: theOtherNode,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from layer3 UDN, default network or layer2 UDN")
var dstHost, primaryInf string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode,%s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-77655-Validate egressIP with mixed of multiple overlapping UDNs and default network(layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allNS []string
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
theOtherNode := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create four more namespaces")
// first namespace is for default network, 2nd and 3rd namespaces will be used for layer3 UDNs, last two namespaces will be used for layer2 UDN
ns := oc.Namespace()
allNS = append(allNS, ns)
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create two overlapping layer3 UDNs in 2rd and 3rd namesapces, create two overlapping layer2 UDN in last two namespaces")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
for i := 1; i < 3; i++ {
createGeneralUDNCRD(oc, allNS[i], "udn-network-layer3-"+allNS[i], ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, allNS[i+2], "udn-network-layer2-"+allNS[i+2], ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77655",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
// testpods1 are lcaol pods that co-locate on egress node
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: egressNode,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods1 are remote pods on the other non-egress node
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: theOtherNode,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from UDN or default network")
var dstHost, primaryInf string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-77744-Validate egressIP Failover with non-overlapping and overlapping UDNs (layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
udnNS []string
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create five more namespaces")
// first three namespaces will be used for layer3 UDNs, last three namespaces will be used for layer2 UDN
oc.CreateNamespaceUDN()
ns := oc.Namespace()
udnNS = append(udnNS, ns)
for i := 0; i < 5; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
udnNS = append(udnNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range udnNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create non overlapping & overlapping layer3 UDNs in three namesapces, create non-overlapping & overlapping layer2 UDN in last three namespaces")
cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
for i := 0; i < 3; i++ {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer3")
createGeneralUDNCRD(oc, udnNS[i+3], "udn-network-layer2-"+udnNS[i+3], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer2")
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, egressNodes[0], 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77744",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[0]))
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(udnNS))
testpods2 := make([]pingPodResourceNode, len(udnNS))
for i := 0; i < len(udnNS); i++ {
// testpods1 are pods on egressNode
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods2 are pods on nonEgressNode, egressNodes[1] is currently not a egress node as it is not labelled with egressNodeLabel
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[1],
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from overlapping or non-overlapping UDN")
var dstHost, primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNodes[0])
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(udnNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("7. Label the second node with egressNodeLabel, unlabel the first node, verify egressIP still works after failover.\n")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
exutil.By("8. Check the egress node was updated in the egressip object.\n")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 360*time.Second, false, func(cxt context.Context) (bool, error) {
egressIPMaps1 = getAssignedEIPInEIPObject(oc, egressip1.name)
if len(egressIPMaps1) != 1 || egressIPMaps1[0]["node"] == egressNodes[0] {
e2e.Logf("Wait for new egress node applied,try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to update egress node:%v", egressipErr))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[1]))
exutil.By("9. Validate egressIP again after egressIP failover \n")
exutil.By("9.1 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods after egressIP failover")
for i := 0; i < len(udnNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-Medium-78276-non-overlapping and overlapping UDN egressIP Pods will not be affected by the egressIP set on other netnamespace(layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
allNS []string
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Need at least 1 node for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create five more namespaces")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
for i := 0; i < 5; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create non-overlapping & overlapping layer3 UDNs in first three namesapces, create non-overlapping & overlapping layer2 UDN in last three namespaces")
cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
for i := 0; i < 3; i++ {
createGeneralUDNCRD(oc, allNS[i], "udn-network-layer3-"+allNS[i], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer3")
createGeneralUDNCRD(oc, allNS[i+3], "udn-network-layer2-"+allNS[i+3], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer2")
}
exutil.By("4. Get 3 unused IPs from the same subnet of the egressNode,create 3 egressIP objects with same namespaceSelector but different podSelector")
freeIPs := findFreeIPs(oc, egressNode, 3)
o.Expect(len(freeIPs)).Should(o.Equal(3))
podLabelValues := []string{"pink", "blue", "red", "pink", "blue", "red"}
egressips := make([]egressIPResource1, 3)
for i := 0; i < 3; i++ {
egressips[i] = egressIPResource1{
name: "egressip-78276-" + strconv.Itoa(i),
template: egressIP2Template,
egressIP1: freeIPs[i],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: podLabelValues[i],
}
egressips[i].createEgressIPObject2(oc)
defer egressips[i].deleteEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressips[i].name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
}
exutil.By("5. In each namespace, create a test pod, apply to test pod with label that matches podSelector definied in egressIP object")
testpods := make([]pingPodResource, len(allNS))
for i := 0; i < len(allNS); i++ {
testpods[i] = pingPodResource{
name: "hello-pod" + strconv.Itoa(i),
namespace: allNS[i],
template: pingPodTemplate,
}
testpods[i].createPingPod(oc)
waitPodReady(oc, testpods[i].namespace, testpods[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color="+podLabelValues[i])
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from each pod should use egressIP defined in the egressIP object the pod qualifies")
var dstHost, primaryInf string
var infErr error
e2e.Logf("Trying to get physical interface on the egressNode,%s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
exutil.By("Use tcpdump captured on egressNode to verify egressIP each pod")
for i := 0; i < 3; i++ {
_, cmdOnPod := getRequestURL(dstHost)
// Verify from layer3 UDN pods
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[i])).To(o.BeTrue())
// Verify from layer2 UDN pods
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i+3], testpods[i+3].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[i])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78199-egressIP still works correctly after a UDN network gets deleted then recreated (layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, nodesToBeUsed := getTwoNodesSameSubnet(oc, nodeList)
if !ok || nodesToBeUsed == nil || len(nodesToBeUsed) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodesToBeUsed[0]
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create another namespace")
// first namespace for layer3 UDNs, second namespace will be used for layer2 UDN
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3.1 Create an UDN layer3 in ns1")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, egressNode, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-78199",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In the namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply to all pods with label that matches podSelector definied in egressIP object")
var testpods [2][2]pingPodResourceNode
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
testpods[j][i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i) + "-" + allNS[j],
namespace: ns1,
nodename: nodesToBeUsed[i],
template: pingPodNodeTemplate,
}
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, ns1, testpods[j][i].name)
defer exutil.LabelPod(oc, ns1, testpods[j][i].name, "color-")
err = exutil.LabelPod(oc, ns1, testpods[j][i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from overlapping or non-overlapping UDN")
var dstHost, primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, ns1, testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
exutil.By("7. Delete local and remote test pods that are associated with UDNs, then delete the UDNs.\n")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
removeResource(oc, true, true, "pod", testpods[j][i].name, "-n", testpods[j][i].namespace)
}
}
removeResource(oc, true, true, "UserDefinedNetwork", "udn-network-layer3-"+ns1, "-n", ns1)
removeResource(oc, true, true, "UserDefinedNetwork", "udn-network-layer2-"+ns2, "-n", ns2)
exutil.By("8. Recreate layer3 and layer2 UDNs, recreate local/remote test pods.\n")
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, ns1, testpods[j][i].name)
defer exutil.LabelPod(oc, ns1, testpods[j][i].name, "color-")
err = exutil.LabelPod(oc, ns1, testpods[j][i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("9. Validate egressIP again after recreating UDNs \n")
exutil.By("9.1 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods again after UDN recreation")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, ns1, testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78200-egressIP still works correctly after OVNK restarted on local and remote client host (layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, nodesToBeUsed := getTwoNodesSameSubnet(oc, nodeList)
if !ok || nodesToBeUsed == nil || len(nodesToBeUsed) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodesToBeUsed[0]
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2 Obtain a namespace, create a second one, apply a label to both namespaces that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create a layer3 UDN in ns1, and another layer2 UDN in ns2")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, egressNode, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-78200",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In the namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply to all pods with label that matches podSelector defined in egressIP object")
var testpods [2][2]pingPodResourceNode
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
testpods[j][i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i) + "-" + allNS[j],
namespace: allNS[j],
nodename: nodesToBeUsed[i],
template: pingPodNodeTemplate,
}
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, allNS[j], testpods[j][i].name)
defer exutil.LabelPod(oc, allNS[j], testpods[j][i].name, "color-")
err = exutil.LabelPod(oc, allNS[j], testpods[j][i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("6. Verify egress traffic from these local or remote egressIP pods should use egressIP as their sourceIP")
var dstHost, primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, egressNode)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[j], testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
exutil.By("7. Restart ovnkube-node pod of client host that local egressIP pod are on.\n")
// Since local egressIP pods are on egress node, restart ovnkube-pod of egress node
ovnkPod := ovnkubeNodePod(oc, egressNode)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("8. Validate egressIP again after restarting ovnkude-node pod of client host that local egressIP pods are on \n")
exutil.By("Use tcpdump captured on egressNode to verify egressIP from local pods again after OVNK restart")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[j], testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
exutil.By("9. Restart ovnkube-node pod of client host that remote egressIP pods are on.\n")
// Since remote egressIP pod is on non-egress node, restart ovnkube-pod of the non-egress node nodesToBeUsed[1]
ovnkPod = ovnkubeNodePod(oc, nodesToBeUsed[1])
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("10. Validate egressIP again after restarting ovnkude-node pod of client host that remote egressIP pods on \n")
exutil.By("Use tcpdump captured on egressNode to verify egressIP from remote pods again after OVNK restart")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[j], testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-78293-After reboot egress node EgressIP on UDN still work (layer3/2 and IPv4). [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Need at least 1 node for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2 Obtain a namespace, create a second one, apply a label to both namespaces that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create a layer3 UDN in ns1, create a layer2 UDN in ns2")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Get 1 unused IPs from the same subnet of the egressNode,create an egressIP object")
freeIPs := findFreeIPs(oc, egressNode, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip := egressIPResource1{
name: "egressip-78293",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
egressip.createEgressIPObject2(oc)
defer egressip.deleteEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
exutil.By("5. In each namespace, create a test pod, apply to test pod with label that matches podSelector definied in egressIP object")
testpods := make([]pingPodResource, len(allNS))
for i := 0; i < len(allNS); i++ {
testpods[i] = pingPodResource{
name: "hello-pod" + strconv.Itoa(i),
namespace: allNS[i],
template: pingPodTemplate,
}
testpods[i].createPingPod(oc)
waitPodReady(oc, testpods[i].namespace, testpods[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify that egress traffic from pod use egressIP as its sourceIP")
primaryInf, infErr := getSnifPhyInf(oc, egressNode)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("7.Reboot egress node.\n")
defer checkNodeStatus(oc, egressNode, "Ready")
rebootNode(oc, egressNode)
checkNodeStatus(oc, egressNode, "NotReady")
checkNodeStatus(oc, egressNode, "Ready")
for i := 0; i < len(allNS); i++ {
waitPodReady(oc, testpods[i].namespace, testpods[i].name)
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("8. Check EgressIP is assigned again after reboot.\n")
verifyExpectedEIPNumInEIPObject(oc, egressip.name, 1)
exutil.By("8. Validate egressIP after node reboot \n")
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-NonHyperShiftHOST-Longduration-NonPreRelease-High-78422-EgressIP on UDN still works on next available egress node after previous assigned egress node was deleted (layer3/2 and IPv4 only). [Disruptive]", func() {
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
g.Skip("Skip for non-supported auto scaling machineset platforms!!")
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressIP2Template := filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
exutil.By("1. Get an existing worker node to be non-egress node.")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Need at least 1 worker node, skip the test as the requirement was not fulfilled.")
}
nonEgressNode := nodeList.Items[0].Name
exutil.By("2.Create a new machineset with 2 nodes")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-78422"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
machineNames := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
nodeName0 := clusterinfra.GetNodeNameFromMachine(oc, machineNames[0])
nodeName1 := clusterinfra.GetNodeNameFromMachine(oc, machineNames[1])
exutil.By("3.1 Obtain a namespace, create a second one, apply a label to both namespaces that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3.2. Create a layer3 UDN in ns1, create a layer2 UDN in ns2")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Apply EgressLabel to the first node created by the new machineset\n")
// No need to defer unlabeling the node, as the node will be defer deleted with machineset before end of the test case
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeName0, egressNodeLabel, "true")
exutil.By("5. Get an unused IP address from the first node, create an egressip object with the IP\n")
freeIPs := findFreeIPs(oc, nodeName0, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip := egressIPResource1{
name: "egressip-78422",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
egressip.createEgressIPObject2(oc)
defer egressip.deleteEgressIPObject1(oc)
egressIPMaps := getAssignedEIPInEIPObject(oc, egressip.name)
o.Expect(len(egressIPMaps)).Should(o.Equal(1))
o.Expect(egressIPMaps[0]["node"]).Should(o.Equal(nodeName0))
exutil.By("6. Create a test pod on the non-egress node, apply to the pod with a label that matches podSelector in egressIP object \n")
testpods := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
testpods[i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: allNS[i],
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpods[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("7. Get tcpdump on first egress node, verify that egressIP works on first egress node")
primaryInf, infErr := getSnifPhyInf(oc, nodeName0)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, machineNames[0], tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("8. Apply EgressLabel to the second node created by the new machineset.\n")
// No need to defer unlabeling the node, as the node will be deleted with machineset before the end of the test case
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeName1, egressNodeLabel, "true")
exutil.By("9. Delete the first egress node, verify egressIP migrates to the second egress node.\n")
removeResource(oc, true, true, "machines.machine.openshift.io", machineNames[0], "-n", "openshift-machine-api")
o.Eventually(func() bool {
egressIPMaps := getAssignedEIPInEIPObject(oc, egressip.name)
return len(egressIPMaps) == 1 && egressIPMaps[0]["node"] == nodeName1
}, "120s", "10s").Should(o.BeTrue(), "egressIP was not migrated to next available egress node!!")
exutil.By("10. Get tcpdump on second egress node, verify that egressIP still works after migrating to second egress node")
primaryInf, infErr = getSnifPhyInf(oc, nodeName1)
o.Expect(infErr).NotTo(o.HaveOccurred())
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, nodeName1, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78453-Traffic is load balanced between egress nodes for egressIP UDN (layer3 and IPv4 only) .[Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressIPTemplate := filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
exutil.By("1. Get two worker nodes that are in same subnet, they will be used as egress-assignable nodes, get a third node as non-egress node\n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 || len(nodeList.Items) < 3 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
var nonEgressNode string
for _, node := range nodeList.Items {
if !contains(egressNodes, node.Name) {
nonEgressNode = node.Name
break
}
}
exutil.By("2. Apply EgressLabel Key to nodes.\n")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
exutil.By("3 Obtain first namespace, apply layer3 UDN CRD to it, add to the namespace with a label matching the namespaceSelector of egressIP object that will be created in step 4")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Create an egressip object\n")
freeIPs := findFreeIPs(oc, egressNodes[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip1 := egressIPResource1{
name: "egressip-78453",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
egressip1.createEgressIPObject1(oc)
defer egressip1.deleteEgressIPObject1(oc)
//Replace matchLabel with matchExpressions
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchExpressions\":[{\"key\": \"name\", \"operator\": \"In\", \"values\": [\"test\"]}]}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchLabels\":null}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
verifyExpectedEIPNumInEIPObject(oc, egressip1.name, 2)
exutil.By("5. Create two pods, one pod is local to egress node, another pod is remote to egress node ")
pod1 := pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
pod2 := pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns1, pod2.name)
exutil.By("6. Check source IP is randomly one of egress ips.\n")
exutil.By("6.1 Use tcpdump to verify egressIP, create tcpdump sniffer Daemonset first.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump", "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump", "true")
primaryInf, infErr := getSnifPhyInf(oc, egressNodes[0])
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
defer deleteTcpdumpDS(oc, "tcpdump-78453", ns1)
tcpdumpDS, snifErr := createSnifferDaemonset(oc, ns1, "tcpdump-78453", "tcpdump", "true", dstHost, primaryInf, 80)
o.Expect(snifErr).NotTo(o.HaveOccurred())
exutil.By("6.2 Verify egressIP load balancing from local pod.")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod1.namespace, pod1.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for local pod %s", freeIPs[0], freeIPs[1], pod1.name))
exutil.By("6.3 Verify egressIP load balancing from remote pod.")
egressipErr = wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod2.namespace, pod2.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for remote pod %s", freeIPs[0], freeIPs[1], pod2.name))
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-79097-Traffic is load balanced between egress nodes for egressIP UDN (layer2 and IPv4 only) .[Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressIPTemplate := filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
exutil.By("1. Get two worker nodes that are in same subnet, they will be used as egress-assignable nodes, get a third node as non-egress node\n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 || len(nodeList.Items) < 3 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
var nonEgressNode string
for _, node := range nodeList.Items {
if !contains(egressNodes, node.Name) {
nonEgressNode = node.Name
break
}
}
exutil.By("2. Apply EgressLabel Key to nodes.\n")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
exutil.By("3 Obtain first namespace, apply layer2 UDN CRD to it, add to the namespace with a label matching the namespaceSelector of egressIP object that will be created in step 4")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer2-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Create an egressip object\n")
freeIPs := findFreeIPs(oc, egressNodes[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip1 := egressIPResource1{
name: "egressip-78453",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
egressip1.createEgressIPObject1(oc)
defer egressip1.deleteEgressIPObject1(oc)
//Replce matchLabel with matchExpressions
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchExpressions\":[{\"key\": \"name\", \"operator\": \"In\", \"values\": [\"test\"]}]}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchLabels\":null}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
verifyExpectedEIPNumInEIPObject(oc, egressip1.name, 2)
exutil.By("5. Create two pods, one pod is local to egress node, another pod is remote to egress node ")
pod1 := pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
pod2 := pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns1, pod2.name)
exutil.By("6. Check source IP is randomly one of egress ips.\n")
exutil.By("6.1 Use tcpdump to verify egressIP, create tcpdump sniffer Daemonset first.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump", "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump", "true")
primaryInf, infErr := getSnifPhyInf(oc, egressNodes[0])
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
defer deleteTcpdumpDS(oc, "tcpdump-78453", ns1)
tcpdumpDS, snifErr := createSnifferDaemonset(oc, ns1, "tcpdump-78453", "tcpdump", "true", dstHost, primaryInf, 80)
o.Expect(snifErr).NotTo(o.HaveOccurred())
exutil.By("6.2 Verify egressIP load balancing from local pod.")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod1.namespace, pod1.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for local pod %s", freeIPs[0], freeIPs[1], pod1.name))
exutil.By("6.3 Verify egressIP load balancing from remote pod.")
egressipErr = wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod2.namespace, pod2.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for remote pod %s", freeIPs[0], freeIPs[1], pod2.name))
})
})
var _ = g.Describe("[sig-networking] SDN udn EgressIP IPv6", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-"+getRandomString(), exutil.KubeConfigPath())
egressNodeLabel = "k8s.ovn.org/egress-assignable"
dstHostv6 = "2620:52:0:800:3673:5aff:fe99:92f0"
ipStackType string
)
g.BeforeEach(func() {
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1 or rdu2 dual stack cluster. , skip for other envrionment!!!")
}
ipStackType = checkIPStackType(oc)
if ipStackType == "ipv4single" {
g.Skip("It is not a dualsatck or singlev6 cluster, skip this test!!!")
}
if strings.Contains(msg, "offload.openshift-qe.sdn.com") {
dstHostv6 = "2620:52:0:800:3673:5aff:fe98:d2d0"
}
})
// author: [email protected]
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-77840-Validate egressIP with mixed of multiple non-overlapping UDNs and default network(layer3 and IPv6/dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace for default network, create two more namespaces for two non-overlapping UDNs")
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
udnNS := []string{ns2, ns3}
allNS := []string{ns1, ns2, ns3}
exutil.By("2.3 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create two different layer3 UDNs in namesapce ns1 and ns2")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48", "2011:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48", "2011:100:200::0/48"}
}
for i := 0; i < 2; i++ {
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], "", "", cidr[i], "layer3")
} else {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[i], ipv6cidr[i], "", "layer3")
}
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-77840",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
testpods3 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
// testpods1 are local pods that co-locate on assignedEIPNodev4 for dualstack
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
}
// testpods2 are local pods that co-locate on assignedEIPNodev6
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
// testpods3 are remote pods on the other non-egress node
testpods3[i] = pingPodResourceNode{
name: "hello-pod3-" + allNS[i],
namespace: allNS[i],
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpods3[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods3[i].name)
}
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
e2e.Logf("Trying to get physical interface on the node,%s", egressNode1)
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
exutil.By("6.1 Verify egressIP from IPv4 perspective")
dstHostv4 := nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 := getRequestURL(dstHostv4)
exutil.By("6.2 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods1[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.3 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods3[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("6.4 Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 := fmt.Sprintf("timeout 90s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 := getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.5 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods2[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.6 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods3[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-77841-Validate egressIP with mixed of multiple overlapping UDNs and default network(layer3 and IPv6/dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace for default network, create two more namespaces for two overlapping UDNs")
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
udnNS := []string{ns2, ns3}
allNS := []string{ns1, ns2, ns3}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create two overlapping layer3 UDNs between namesapce ns1 and ns2")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48"}
}
for i := 0; i < 2; i++ {
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-77841",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
testpods3 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
// testpods1 are local pods that co-locate on assignedEIPNodev4 for dualstack
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
}
// testpods2 are local pods that co-locate on assignedEIPNodev6
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
// testpods3 are remote pods on the other non-egress node
testpods3[i] = pingPodResourceNode{
name: "hello-pod3-" + allNS[i],
namespace: allNS[i],
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpods3[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods3[i].name)
}
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
e2e.Logf("Trying to get physical interface on the node,%s", egressNode1)
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
exutil.By("6.1 Verify egressIP from IPv4 perspective")
dstHostv4 := nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 := getRequestURL(dstHostv4)
exutil.By("6.2 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods1[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.3 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods3[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("6.4 Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 := fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 := getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.5 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods2[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.6 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods3[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-77842-Validate egressIP Failover with non-overlapping and overlapping UDNs (layer3 and IPv6 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
exutil.By("2.1 Create three UDN namespaces")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
udnNS := []string{ns1, ns2, ns3}
exutil.By("3.1. Create non-overlapping layer3 UDNs between ns1 and ns2, overlapping layer3 UDN between ns2 and ns3")
exutil.By("3.2 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
}
for i := 0; i < len(udnNS); i++ {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", udnNS[i], "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", udnNS[i], "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], "", "", cidr[i], "layer3")
} else {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[i], ipv6cidr[i], "", "layer3")
}
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPv6s(oc, egressNodes[0], 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77842",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[0]))
exutil.By("4.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("4.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(udnNS))
testpods2 := make([]pingPodResourceNode, len(udnNS))
for i := 0; i < len(udnNS); i++ {
// testpods1 are pods on egressNode
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods2 are pods on nonEgressNode, egressNodes[1] is currently not a egress node as it has not been labelled with egressNodeLabel yet
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[1],
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("5. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from overlapping or non-overlapping UDN")
var primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("5.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNodes[0])
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPod = getRequestURL("[" + dstHostv6 + "]")
exutil.By("5.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(udnNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("6. Label the second node with egressNodeLabel, unlabel the first node, verify egressIP still works after failover.\n")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
exutil.By("7. Check the egress node was updated in the egressip object.\n")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 360*time.Second, false, func(cxt context.Context) (bool, error) {
egressIPMaps1 = getAssignedEIPInEIPObject(oc, egressip1.name)
if len(egressIPMaps1) != 1 || egressIPMaps1[0]["node"] == egressNodes[0] {
e2e.Logf("Wait for new egress node applied,try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to update egress node:%v", egressipErr))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[1]))
exutil.By("8. Validate egressIP again after egressIP failover \n")
for i := 0; i < len(udnNS); i++ {
exutil.By("8.1 Use tcpdump captured on egressNode to verify egressIP from local pods after egressIP failover")
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
exutil.By("8.2 Use tcpdump captured on egressNode to verify egressIP from remote pods after egressIP failover")
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78247-egressIP still works correctly after a UDN network gets deleted then recreated (layer3 + v6 or dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode, for dualstack, need to label two nodes to be egressNodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2. Obtain a namespace ns1, apply to ns1 with label that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1 Create a layer3 UDN in ns1")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48"}
}
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-78247",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
// For dualstack, need to find out the actual nodes where v4 and v6 egressIP address are assigned
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In the namespace, create local test pod on egressNode, create remote test pod on nonEgressNode ")
var testpods []pingPodResourceNode
var testpod1 pingPodResourceNode
if ipStackType == "dualstack" {
// testpod1 is local pod on assignedEIPNodev4 for dualstack
testpod1 = pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpod1.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod1.name)
testpods = append(testpods, testpod1)
}
// testpod2 is local pod on assignedEIPNodev6 for dualstack
testpod2 := pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpod2.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod2.name)
testpods = append(testpods, testpod2)
// testpod3 is remote pod on the other non-egress node
testpod3 := pingPodResourceNode{
name: "hello-pod3-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpod3.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod3.name)
testpods = append(testpods, testpod3)
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
var dstHostv4, tcpdumpCmdv4, cmdOnPodv4, tcpdumpCmdv6, cmdOnPodv6 string
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
dstHostv4 = nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 = getRequestURL(dstHostv4)
exutil.By("6.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 = fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 = getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("7. Delete local and remote test pods that are associated with UDN in ns1 first, then delete the UDN.\n")
for i := 0; i < len(testpods); i++ {
removeResource(oc, true, true, "pod", testpods[i].name, "-n", testpods[i].namespace)
}
removeResource(oc, true, true, "UserDefinedNetwork", "udn-network-layer3-"+ns1, "-n", ns1)
exutil.By("8. Recreate the UDN and local/remote test pods in ns1.\n")
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
for i := 0; i < len(testpods); i++ {
testpods[i].createPingPodNode(oc)
waitPodReady(oc, ns1, testpods[i].name)
}
exutil.By("9. Validate egressIP again from local and remote pods after recreating UDN \n")
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
exutil.By("9.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("9.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
exutil.By("9.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("9.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78274-egressIP still works correctly after OVNK restarted on local and remote client host (layer3 + v6 or dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode, for dualstack, need to label two nodes to be egressNodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2. Obtain a namespace, apply a label to the namespace that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. Create a layer3 UDN in ns1")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48"}
}
for i := 0; i < 2; i++ {
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-78274",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
// For dualstack, need to find out the actual nodes where v4 and v6 egressIP address are assigned
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In the namespace, create local test pod on egressNode, create remote test pod on nonEgressNode ")
var testpod1, testpod2, testpod3 pingPodResourceNode
if ipStackType == "dualstack" {
// testpod1 is local pod on assignedEIPNodev4 for dualstack
testpod1 = pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpod1.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod1.name)
}
// testpod2 is local pod on assignedEIPNodev6 for dualstack
testpod2 = pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpod2.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod2.name)
// testpod3 is remote pod on the other non-egress node
testpod3 = pingPodResourceNode{
name: "hello-pod3-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpod3.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod3.name)
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
var dstHostv4, tcpdumpCmdv4, cmdOnPodv4, tcpdumpCmdv6, cmdOnPodv6 string
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
dstHostv4 = nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 = getRequestURL(dstHostv4)
exutil.By("6.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 = fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 = getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("7. Restart ovnkube-node pod of client host that local egressIP pod is on.\n")
// Since local egressIP pod is on egress node, so just to restart ovnkube-pod of egress node
ovnkPod := ovnkubeNodePod(oc, egressNode1)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
if ipStackType == "dualstack" {
ovnkPod := ovnkubeNodePod(oc, egressNode2)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
}
exutil.By("8. Validate egressIP again from local and remote pods after recreating UDN \n")
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
exutil.By("8.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("8.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
exutil.By("8.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("8.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("9. Restart ovnkube-node pod of client host that remote egressIP pod is on.\n")
// Since local egressIP pod is on egress node, so just to restart ovnkube-pod of egress node
ovnkPod = ovnkubeNodePod(oc, nonEgressNode)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("10. Validate egressIP again from local and remote pods after recreating UDN \n")
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
exutil.By("10.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("10.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
exutil.By("10.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("10.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
4a3658b2-99c9-427d-a69b-de1f48188145
|
Author:jechen-ConnectedOnly-NonPreRelease-High-77654-Validate egressIP with mixed of multiple non-overlapping UDNs and default network(layer3/2 and IPv4 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-77654-Validate egressIP with mixed of multiple non-overlapping UDNs and default network(layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allNS []string
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
theOtherNode := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create four more namespaces")
// first namespace is used for default network, second and third namespaces will be used for layer3 UDNs, last two namespaces will be used for layer2 UDN
ns := oc.Namespace()
allNS = append(allNS, ns)
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3 Create two different layer3 UDNs in 2nd and 3rd namespaces, two different layer2 UDN in last two namespaces")
cidr := []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48", "2011:100:200::0/48"}
for i := 1; i < 3; i++ {
createGeneralUDNCRD(oc, allNS[i], "udn-network-layer3-"+allNS[i], ipv4cidr[i-1], ipv6cidr[i-1], cidr[i-1], "layer3")
createGeneralUDNCRD(oc, allNS[i+2], "udn-network-layer2-"+allNS[i+2], ipv4cidr[i-1], ipv6cidr[i-1], cidr[i-1], "layer2")
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77654",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
// testpods1 are local pods that co-locate on egress node
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: egressNode,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods1 are remote pods on the other non-egress node
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: theOtherNode,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from layer3 UDN, default network or layer2 UDN")
var dstHost, primaryInf string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode,%s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
a2209a9b-3609-4fda-8f09-69028206f662
|
Author:jechen-ConnectedOnly-NonPreRelease-High-77655-Validate egressIP with mixed of multiple overlapping UDNs and default network(layer3/2 and IPv4 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-77655-Validate egressIP with mixed of multiple overlapping UDNs and default network(layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allNS []string
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
theOtherNode := nodeList.Items[1].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create four more namespaces")
// first namespace is for default network, 2nd and 3rd namespaces will be used for layer3 UDNs, last two namespaces will be used for layer2 UDN
ns := oc.Namespace()
allNS = append(allNS, ns)
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create two overlapping layer3 UDNs in 2rd and 3rd namesapces, create two overlapping layer2 UDN in last two namespaces")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
for i := 1; i < 3; i++ {
createGeneralUDNCRD(oc, allNS[i], "udn-network-layer3-"+allNS[i], ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, allNS[i+2], "udn-network-layer2-"+allNS[i+2], ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, nodeList.Items[0].Name, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77655",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
// testpods1 are lcaol pods that co-locate on egress node
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: egressNode,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods1 are remote pods on the other non-egress node
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: theOtherNode,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from UDN or default network")
var dstHost, primaryInf string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
28dd26da-b0e7-4ddd-abf4-e38163a4d987
|
Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-77744-Validate egressIP Failover with non-overlapping and overlapping UDNs (layer3/2 and IPv4 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-77744-Validate egressIP Failover with non-overlapping and overlapping UDNs (layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
udnNS []string
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create five more namespaces")
// first three namespaces will be used for layer3 UDNs, last three namespaces will be used for layer2 UDN
oc.CreateNamespaceUDN()
ns := oc.Namespace()
udnNS = append(udnNS, ns)
for i := 0; i < 5; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
udnNS = append(udnNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range udnNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create non overlapping & overlapping layer3 UDNs in three namesapces, create non-overlapping & overlapping layer2 UDN in last three namespaces")
cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
for i := 0; i < 3; i++ {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer3")
createGeneralUDNCRD(oc, udnNS[i+3], "udn-network-layer2-"+udnNS[i+3], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer2")
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, egressNodes[0], 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77744",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[0]))
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(udnNS))
testpods2 := make([]pingPodResourceNode, len(udnNS))
for i := 0; i < len(udnNS); i++ {
// testpods1 are pods on egressNode
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods2 are pods on nonEgressNode, egressNodes[1] is currently not a egress node as it is not labelled with egressNodeLabel
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[1],
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from overlapping or non-overlapping UDN")
var dstHost, primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNodes[0])
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(udnNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("7. Label the second node with egressNodeLabel, unlabel the first node, verify egressIP still works after failover.\n")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
exutil.By("8. Check the egress node was updated in the egressip object.\n")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 360*time.Second, false, func(cxt context.Context) (bool, error) {
egressIPMaps1 = getAssignedEIPInEIPObject(oc, egressip1.name)
if len(egressIPMaps1) != 1 || egressIPMaps1[0]["node"] == egressNodes[0] {
e2e.Logf("Wait for new egress node applied,try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to update egress node:%v", egressipErr))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[1]))
exutil.By("9. Validate egressIP again after egressIP failover \n")
exutil.By("9.1 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods after egressIP failover")
for i := 0; i < len(udnNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d027c9e2-e9dd-4199-b774-cd590ea9e7ea
|
Author:jechen-ConnectedOnly-NonPreRelease-Medium-78276-non-overlapping and overlapping UDN egressIP Pods will not be affected by the egressIP set on other netnamespace(layer3/2 and IPv4 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-Medium-78276-non-overlapping and overlapping UDN egressIP Pods will not be affected by the egressIP set on other netnamespace(layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
allNS []string
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Need at least 1 node for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create five more namespaces")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
for i := 0; i < 5; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector defined in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create non-overlapping & overlapping layer3 UDNs in first three namesapces, create non-overlapping & overlapping layer2 UDN in last three namespaces")
cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
for i := 0; i < 3; i++ {
createGeneralUDNCRD(oc, allNS[i], "udn-network-layer3-"+allNS[i], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer3")
createGeneralUDNCRD(oc, allNS[i+3], "udn-network-layer2-"+allNS[i+3], ipv4cidr[i], ipv6cidr[i], cidr[i], "layer2")
}
exutil.By("4. Get 3 unused IPs from the same subnet of the egressNode,create 3 egressIP objects with same namespaceSelector but different podSelector")
freeIPs := findFreeIPs(oc, egressNode, 3)
o.Expect(len(freeIPs)).Should(o.Equal(3))
podLabelValues := []string{"pink", "blue", "red", "pink", "blue", "red"}
egressips := make([]egressIPResource1, 3)
for i := 0; i < 3; i++ {
egressips[i] = egressIPResource1{
name: "egressip-78276-" + strconv.Itoa(i),
template: egressIP2Template,
egressIP1: freeIPs[i],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: podLabelValues[i],
}
egressips[i].createEgressIPObject2(oc)
defer egressips[i].deleteEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressips[i].name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
}
exutil.By("5. In each namespace, create a test pod, apply to test pod with label that matches podSelector definied in egressIP object")
testpods := make([]pingPodResource, len(allNS))
for i := 0; i < len(allNS); i++ {
testpods[i] = pingPodResource{
name: "hello-pod" + strconv.Itoa(i),
namespace: allNS[i],
template: pingPodTemplate,
}
testpods[i].createPingPod(oc)
waitPodReady(oc, testpods[i].namespace, testpods[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color="+podLabelValues[i])
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from each pod should use egressIP defined in the egressIP object the pod qualifies")
var dstHost, primaryInf string
var infErr error
e2e.Logf("Trying to get physical interface on the egressNode,%s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
exutil.By("Use tcpdump captured on egressNode to verify egressIP each pod")
for i := 0; i < 3; i++ {
_, cmdOnPod := getRequestURL(dstHost)
// Verify from layer3 UDN pods
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[i])).To(o.BeTrue())
// Verify from layer2 UDN pods
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i+3], testpods[i+3].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[i])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
f82e3bef-dbe7-4db5-a7a4-6f07f67708a6
|
Author:jechen-ConnectedOnly-NonPreRelease-High-78199-egressIP still works correctly after a UDN network gets deleted then recreated (layer3/2 and IPv4 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78199-egressIP still works correctly after a UDN network gets deleted then recreated (layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, nodesToBeUsed := getTwoNodesSameSubnet(oc, nodeList)
if !ok || nodesToBeUsed == nil || len(nodesToBeUsed) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodesToBeUsed[0]
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace, create another namespace")
// first namespace for layer3 UDNs, second namespace will be used for layer2 UDN
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3.1 Create an UDN layer3 in ns1")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, egressNode, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-78199",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In the namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply to all pods with label that matches podSelector definied in egressIP object")
var testpods [2][2]pingPodResourceNode
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
testpods[j][i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i) + "-" + allNS[j],
namespace: ns1,
nodename: nodesToBeUsed[i],
template: pingPodNodeTemplate,
}
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, ns1, testpods[j][i].name)
defer exutil.LabelPod(oc, ns1, testpods[j][i].name, "color-")
err = exutil.LabelPod(oc, ns1, testpods[j][i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("6. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from overlapping or non-overlapping UDN")
var dstHost, primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, ns1, testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
exutil.By("7. Delete local and remote test pods that are associated with UDNs, then delete the UDNs.\n")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
removeResource(oc, true, true, "pod", testpods[j][i].name, "-n", testpods[j][i].namespace)
}
}
removeResource(oc, true, true, "UserDefinedNetwork", "udn-network-layer3-"+ns1, "-n", ns1)
removeResource(oc, true, true, "UserDefinedNetwork", "udn-network-layer2-"+ns2, "-n", ns2)
exutil.By("8. Recreate layer3 and layer2 UDNs, recreate local/remote test pods.\n")
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, ns1, testpods[j][i].name)
defer exutil.LabelPod(oc, ns1, testpods[j][i].name, "color-")
err = exutil.LabelPod(oc, ns1, testpods[j][i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("9. Validate egressIP again after recreating UDNs \n")
exutil.By("9.1 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods again after UDN recreation")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, ns1, testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
f659f67c-4326-4aa8-8dd1-6d83b3a4af78
|
Author:jechen-ConnectedOnly-NonPreRelease-High-78200-egressIP still works correctly after OVNK restarted on local and remote client host (layer3/2 and IPv4 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78200-egressIP still works correctly after OVNK restarted on local and remote client host (layer3/2 and IPv4 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, nodesToBeUsed := getTwoNodesSameSubnet(oc, nodeList)
if !ok || nodesToBeUsed == nil || len(nodesToBeUsed) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodesToBeUsed[0]
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2 Obtain a namespace, create a second one, apply a label to both namespaces that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create a layer3 UDN in ns1, and another layer2 UDN in ns2")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPs(oc, egressNode, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-78200",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNode))
exutil.By("5.1 In the namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply to all pods with label that matches podSelector defined in egressIP object")
var testpods [2][2]pingPodResourceNode
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
testpods[j][i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i) + "-" + allNS[j],
namespace: allNS[j],
nodename: nodesToBeUsed[i],
template: pingPodNodeTemplate,
}
testpods[j][i].createPingPodNode(oc)
waitPodReady(oc, allNS[j], testpods[j][i].name)
defer exutil.LabelPod(oc, allNS[j], testpods[j][i].name, "color-")
err = exutil.LabelPod(oc, allNS[j], testpods[j][i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("6. Verify egress traffic from these local or remote egressIP pods should use egressIP as their sourceIP")
var dstHost, primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("6.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNode)
primaryInf, infErr = getSnifPhyInf(oc, egressNode)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost = nslookDomainName("ifconfig.me")
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
exutil.By("6.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[j], testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
exutil.By("7. Restart ovnkube-node pod of client host that local egressIP pod are on.\n")
// Since local egressIP pods are on egress node, restart ovnkube-pod of egress node
ovnkPod := ovnkubeNodePod(oc, egressNode)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("8. Validate egressIP again after restarting ovnkude-node pod of client host that local egressIP pods are on \n")
exutil.By("Use tcpdump captured on egressNode to verify egressIP from local pods again after OVNK restart")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[j], testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
exutil.By("9. Restart ovnkube-node pod of client host that remote egressIP pods are on.\n")
// Since remote egressIP pod is on non-egress node, restart ovnkube-pod of the non-egress node nodesToBeUsed[1]
ovnkPod = ovnkubeNodePod(oc, nodesToBeUsed[1])
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("10. Validate egressIP again after restarting ovnkude-node pod of client host that remote egressIP pods on \n")
exutil.By("Use tcpdump captured on egressNode to verify egressIP from remote pods again after OVNK restart")
for j := 0; j < len(allNS); j++ {
for i := 0; i < 2; i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[j], testpods[j][i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
454c4d6d-4456-4328-ac2b-a91590d7f1a7
|
Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-78293-After reboot egress node EgressIP on UDN still work (layer3/2 and IPv4). [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-78293-After reboot egress node EgressIP on UDN still work (layer3/2 and IPv4). [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Need at least 1 node for the test, the prerequirement was not fullfilled, skip the case!!")
}
egressNode := nodeList.Items[0].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("2 Obtain a namespace, create a second one, apply a label to both namespaces that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create a layer3 UDN in ns1, create a layer2 UDN in ns2")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Get 1 unused IPs from the same subnet of the egressNode,create an egressIP object")
freeIPs := findFreeIPs(oc, egressNode, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip := egressIPResource1{
name: "egressip-78293",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
egressip.createEgressIPObject2(oc)
defer egressip.deleteEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
exutil.By("5. In each namespace, create a test pod, apply to test pod with label that matches podSelector definied in egressIP object")
testpods := make([]pingPodResource, len(allNS))
for i := 0; i < len(allNS); i++ {
testpods[i] = pingPodResource{
name: "hello-pod" + strconv.Itoa(i),
namespace: allNS[i],
template: pingPodTemplate,
}
testpods[i].createPingPod(oc)
waitPodReady(oc, testpods[i].namespace, testpods[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("6. Verify that egress traffic from pod use egressIP as its sourceIP")
primaryInf, infErr := getSnifPhyInf(oc, egressNode)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("7.Reboot egress node.\n")
defer checkNodeStatus(oc, egressNode, "Ready")
rebootNode(oc, egressNode)
checkNodeStatus(oc, egressNode, "NotReady")
checkNodeStatus(oc, egressNode, "Ready")
for i := 0; i < len(allNS); i++ {
waitPodReady(oc, testpods[i].namespace, testpods[i].name)
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("8. Check EgressIP is assigned again after reboot.\n")
verifyExpectedEIPNumInEIPObject(oc, egressip.name, 1)
exutil.By("8. Validate egressIP after node reboot \n")
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNode, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
f1360fba-b7ba-4160-8a14-b7fd2d4337c0
|
Author:jechen-NonHyperShiftHOST-Longduration-NonPreRelease-High-78422-EgressIP on UDN still works on next available egress node after previous assigned egress node was deleted (layer3/2 and IPv4 only). [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-NonHyperShiftHOST-Longduration-NonPreRelease-High-78422-EgressIP on UDN still works on next available egress node after previous assigned egress node was deleted (layer3/2 and IPv4 only). [Disruptive]", func() {
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") {
g.Skip("Skip for non-supported auto scaling machineset platforms!!")
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressIP2Template := filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
exutil.By("1. Get an existing worker node to be non-egress node.")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Need at least 1 worker node, skip the test as the requirement was not fulfilled.")
}
nonEgressNode := nodeList.Items[0].Name
exutil.By("2.Create a new machineset with 2 nodes")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-78422"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
machineNames := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
nodeName0 := clusterinfra.GetNodeNameFromMachine(oc, machineNames[0])
nodeName1 := clusterinfra.GetNodeNameFromMachine(oc, machineNames[1])
exutil.By("3.1 Obtain a namespace, create a second one, apply a label to both namespaces that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
allNS := []string{ns1, ns2}
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3.2. Create a layer3 UDN in ns1, create a layer2 UDN in ns2")
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
createGeneralUDNCRD(oc, ns2, "udn-network-layer2-"+ns2, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
exutil.By("4. Apply EgressLabel to the first node created by the new machineset\n")
// No need to defer unlabeling the node, as the node will be defer deleted with machineset before end of the test case
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeName0, egressNodeLabel, "true")
exutil.By("5. Get an unused IP address from the first node, create an egressip object with the IP\n")
freeIPs := findFreeIPs(oc, nodeName0, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip := egressIPResource1{
name: "egressip-78422",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
egressip.createEgressIPObject2(oc)
defer egressip.deleteEgressIPObject1(oc)
egressIPMaps := getAssignedEIPInEIPObject(oc, egressip.name)
o.Expect(len(egressIPMaps)).Should(o.Equal(1))
o.Expect(egressIPMaps[0]["node"]).Should(o.Equal(nodeName0))
exutil.By("6. Create a test pod on the non-egress node, apply to the pod with a label that matches podSelector in egressIP object \n")
testpods := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
testpods[i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: allNS[i],
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpods[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods[i].name)
defer exutil.LabelPod(oc, allNS[i], testpods[i].name, "color-")
err = exutil.LabelPod(oc, allNS[i], testpods[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("7. Get tcpdump on first egress node, verify that egressIP works on first egress node")
primaryInf, infErr := getSnifPhyInf(oc, nodeName0)
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod := getRequestURL(dstHost)
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, machineNames[0], tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("8. Apply EgressLabel to the second node created by the new machineset.\n")
// No need to defer unlabeling the node, as the node will be deleted with machineset before the end of the test case
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, nodeName1, egressNodeLabel, "true")
exutil.By("9. Delete the first egress node, verify egressIP migrates to the second egress node.\n")
removeResource(oc, true, true, "machines.machine.openshift.io", machineNames[0], "-n", "openshift-machine-api")
o.Eventually(func() bool {
egressIPMaps := getAssignedEIPInEIPObject(oc, egressip.name)
return len(egressIPMaps) == 1 && egressIPMaps[0]["node"] == nodeName1
}, "120s", "10s").Should(o.BeTrue(), "egressIP was not migrated to next available egress node!!")
exutil.By("10. Get tcpdump on second egress node, verify that egressIP still works after migrating to second egress node")
primaryInf, infErr = getSnifPhyInf(oc, nodeName1)
o.Expect(infErr).NotTo(o.HaveOccurred())
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHost)
_, cmdOnPod = getRequestURL(dstHost)
for i := 0; i < len(allNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, nodeName1, tcpdumpCmd, allNS[i], testpods[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
fdd79fac-ae56-4b36-9819-d0267d477c88
|
Author:jechen-ConnectedOnly-NonPreRelease-High-78453-Traffic is load balanced between egress nodes for egressIP UDN (layer3 and IPv4 only) .[Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78453-Traffic is load balanced between egress nodes for egressIP UDN (layer3 and IPv4 only) .[Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressIPTemplate := filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
exutil.By("1. Get two worker nodes that are in same subnet, they will be used as egress-assignable nodes, get a third node as non-egress node\n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 || len(nodeList.Items) < 3 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
var nonEgressNode string
for _, node := range nodeList.Items {
if !contains(egressNodes, node.Name) {
nonEgressNode = node.Name
break
}
}
exutil.By("2. Apply EgressLabel Key to nodes.\n")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
exutil.By("3 Obtain first namespace, apply layer3 UDN CRD to it, add to the namespace with a label matching the namespaceSelector of egressIP object that will be created in step 4")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer3")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Create an egressip object\n")
freeIPs := findFreeIPs(oc, egressNodes[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip1 := egressIPResource1{
name: "egressip-78453",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
egressip1.createEgressIPObject1(oc)
defer egressip1.deleteEgressIPObject1(oc)
//Replace matchLabel with matchExpressions
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchExpressions\":[{\"key\": \"name\", \"operator\": \"In\", \"values\": [\"test\"]}]}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchLabels\":null}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
verifyExpectedEIPNumInEIPObject(oc, egressip1.name, 2)
exutil.By("5. Create two pods, one pod is local to egress node, another pod is remote to egress node ")
pod1 := pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
pod2 := pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns1, pod2.name)
exutil.By("6. Check source IP is randomly one of egress ips.\n")
exutil.By("6.1 Use tcpdump to verify egressIP, create tcpdump sniffer Daemonset first.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump", "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump", "true")
primaryInf, infErr := getSnifPhyInf(oc, egressNodes[0])
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
defer deleteTcpdumpDS(oc, "tcpdump-78453", ns1)
tcpdumpDS, snifErr := createSnifferDaemonset(oc, ns1, "tcpdump-78453", "tcpdump", "true", dstHost, primaryInf, 80)
o.Expect(snifErr).NotTo(o.HaveOccurred())
exutil.By("6.2 Verify egressIP load balancing from local pod.")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod1.namespace, pod1.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for local pod %s", freeIPs[0], freeIPs[1], pod1.name))
exutil.By("6.3 Verify egressIP load balancing from remote pod.")
egressipErr = wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod2.namespace, pod2.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for remote pod %s", freeIPs[0], freeIPs[1], pod2.name))
})
| |||||
test case
|
openshift/openshift-tests-private
|
739b25c6-98e2-446d-9cb0-81934431ff70
|
Author:jechen-ConnectedOnly-NonPreRelease-High-79097-Traffic is load balanced between egress nodes for egressIP UDN (layer2 and IPv4 only) .[Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-79097-Traffic is load balanced between egress nodes for egressIP UDN (layer2 and IPv4 only) .[Serial]", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressIPTemplate := filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
exutil.By("1. Get two worker nodes that are in same subnet, they will be used as egress-assignable nodes, get a third node as non-egress node\n")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 || len(nodeList.Items) < 3 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
var nonEgressNode string
for _, node := range nodeList.Items {
if !contains(egressNodes, node.Name) {
nonEgressNode = node.Name
break
}
}
exutil.By("2. Apply EgressLabel Key to nodes.\n")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
exutil.By("3 Obtain first namespace, apply layer2 UDN CRD to it, add to the namespace with a label matching the namespaceSelector of egressIP object that will be created in step 4")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
cidr := []string{"10.150.0.0/16"}
ipv4cidr := []string{"10.150.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/48"}
createGeneralUDNCRD(oc, ns1, "udn-network-layer2-"+ns1, ipv4cidr[0], ipv6cidr[0], cidr[0], "layer2")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Create an egressip object\n")
freeIPs := findFreeIPs(oc, egressNodes[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip1 := egressIPResource1{
name: "egressip-78453",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
egressip1.createEgressIPObject1(oc)
defer egressip1.deleteEgressIPObject1(oc)
//Replce matchLabel with matchExpressions
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchExpressions\":[{\"key\": \"name\", \"operator\": \"In\", \"values\": [\"test\"]}]}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("egressip/egressip-78453", "-p", "{\"spec\":{\"namespaceSelector\":{\"matchLabels\":null}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
verifyExpectedEIPNumInEIPObject(oc, egressip1.name, 2)
exutil.By("5. Create two pods, one pod is local to egress node, another pod is remote to egress node ")
pod1 := pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
pod2 := pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns1, pod2.name)
exutil.By("6. Check source IP is randomly one of egress ips.\n")
exutil.By("6.1 Use tcpdump to verify egressIP, create tcpdump sniffer Daemonset first.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], "tcpdump", "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], "tcpdump", "true")
primaryInf, infErr := getSnifPhyInf(oc, egressNodes[0])
o.Expect(infErr).NotTo(o.HaveOccurred())
dstHost := nslookDomainName("ifconfig.me")
defer deleteTcpdumpDS(oc, "tcpdump-78453", ns1)
tcpdumpDS, snifErr := createSnifferDaemonset(oc, ns1, "tcpdump-78453", "tcpdump", "true", dstHost, primaryInf, 80)
o.Expect(snifErr).NotTo(o.HaveOccurred())
exutil.By("6.2 Verify egressIP load balancing from local pod.")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod1.namespace, pod1.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for local pod %s", freeIPs[0], freeIPs[1], pod1.name))
exutil.By("6.3 Verify egressIP load balancing from remote pod.")
egressipErr = wait.PollUntilContextTimeout(context.Background(), 100*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := execCommandInSpecificPod(oc, pod2.namespace, pod2.name, "for i in {1..10}; do curl -s "+url+" --connect-timeout 5 ; sleep 2;echo ;done")
o.Expect(err).NotTo(o.HaveOccurred())
if checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[0], true) != nil || checkMatchedIPs(oc, ns1, tcpdumpDS.name, randomStr, freeIPs[1], true) != nil || err != nil {
e2e.Logf("No matched egressIPs in tcpdump log, try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to get both EgressIPs %s,%s in tcpdump for remote pod %s", freeIPs[0], freeIPs[1], pod2.name))
})
| |||||
test case
|
openshift/openshift-tests-private
|
00236774-e110-4cab-9ffd-b1bd4938c5ae
|
Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-77840-Validate egressIP with mixed of multiple non-overlapping UDNs and default network(layer3 and IPv6/dualstack) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-77840-Validate egressIP with mixed of multiple non-overlapping UDNs and default network(layer3 and IPv6/dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace for default network, create two more namespaces for two non-overlapping UDNs")
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
udnNS := []string{ns2, ns3}
allNS := []string{ns1, ns2, ns3}
exutil.By("2.3 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create two different layer3 UDNs in namesapce ns1 and ns2")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48", "2011:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48", "2011:100:200::0/48"}
}
for i := 0; i < 2; i++ {
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], "", "", cidr[i], "layer3")
} else {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[i], ipv6cidr[i], "", "layer3")
}
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-77840",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
testpods3 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
// testpods1 are local pods that co-locate on assignedEIPNodev4 for dualstack
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
}
// testpods2 are local pods that co-locate on assignedEIPNodev6
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
// testpods3 are remote pods on the other non-egress node
testpods3[i] = pingPodResourceNode{
name: "hello-pod3-" + allNS[i],
namespace: allNS[i],
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpods3[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods3[i].name)
}
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
e2e.Logf("Trying to get physical interface on the node,%s", egressNode1)
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
exutil.By("6.1 Verify egressIP from IPv4 perspective")
dstHostv4 := nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 := getRequestURL(dstHostv4)
exutil.By("6.2 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods1[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.3 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods3[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("6.4 Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 := fmt.Sprintf("timeout 90s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 := getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.5 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods2[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.6 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods3[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
74706d73-d481-455a-b50a-5dd0e901208c
|
Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-77841-Validate egressIP with mixed of multiple overlapping UDNs and default network(layer3 and IPv6/dualstack) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-High-77841-Validate egressIP with mixed of multiple overlapping UDNs and default network(layer3 and IPv6/dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2.1 Obtain first namespace for default network, create two more namespaces for two overlapping UDNs")
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
udnNS := []string{ns2, ns3}
allNS := []string{ns1, ns2, ns3}
exutil.By("2.2 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create two overlapping layer3 UDNs between namesapce ns1 and ns2")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48"}
}
for i := 0; i < 2; i++ {
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-77841",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("5.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(allNS))
testpods2 := make([]pingPodResourceNode, len(allNS))
testpods3 := make([]pingPodResourceNode, len(allNS))
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
// testpods1 are local pods that co-locate on assignedEIPNodev4 for dualstack
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods1[i].name)
}
// testpods2 are local pods that co-locate on assignedEIPNodev6
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + allNS[i],
namespace: allNS[i],
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods2[i].name)
// testpods3 are remote pods on the other non-egress node
testpods3[i] = pingPodResourceNode{
name: "hello-pod3-" + allNS[i],
namespace: allNS[i],
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpods3[i].createPingPodNode(oc)
waitPodReady(oc, allNS[i], testpods3[i].name)
}
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
e2e.Logf("Trying to get physical interface on the node,%s", egressNode1)
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
for i := 0; i < len(allNS); i++ {
if ipStackType == "dualstack" {
exutil.By("6.1 Verify egressIP from IPv4 perspective")
dstHostv4 := nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 := getRequestURL(dstHostv4)
exutil.By("6.2 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods1[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.3 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, allNS[i], testpods3[i].name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("6.4 Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 := fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 := getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.5 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods2[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.6 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, allNS[i], testpods3[i].name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
95805d99-26ae-4b7f-982f-73b90ec8ca4b
|
Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-77842-Validate egressIP Failover with non-overlapping and overlapping UDNs (layer3 and IPv6 only) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-Longduration-NonPreRelease-High-77842-Validate egressIP Failover with non-overlapping and overlapping UDNs (layer3 and IPv6 only) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP2Template = filepath.Join(buildPruningBaseDir, "egressip-config2-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1 Get node list, apply EgressLabel Key to one node to make it egressNode")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
ok, egressNodes := getTwoNodesSameSubnet(oc, nodeList)
if !ok || egressNodes == nil || len(egressNodes) < 2 {
g.Skip("The prerequirement was not fullfilled, skip the case!!")
}
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel, "true")
exutil.By("2.1 Create three UDN namespaces")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
udnNS := []string{ns1, ns2, ns3}
exutil.By("3.1. Create non-overlapping layer3 UDNs between ns1 and ns2, overlapping layer3 UDN between ns2 and ns3")
exutil.By("3.2 Apply a label to all namespaces that matches namespaceSelector definied in egressIP object")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
}
for i := 0; i < len(udnNS); i++ {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", udnNS[i], "org-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", udnNS[i], "org=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], "", "", cidr[i], "layer3")
} else {
createGeneralUDNCRD(oc, udnNS[i], "udn-network-layer3-"+udnNS[i], ipv4cidr[i], ipv6cidr[i], "", "layer3")
}
}
exutil.By("4. Create an egressip object, verify egressIP is assigned to egress node")
freeIPs := findFreeIPv6s(oc, egressNodes[0], 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip1 := egressIPResource1{
name: "egressip-77842",
template: egressIP2Template,
egressIP1: freeIPs[0],
nsLabelKey: "org",
nsLabelValue: "qe",
podLabelKey: "color",
podLabelValue: "pink",
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject2(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
o.Expect(len(egressIPMaps1)).Should(o.Equal(1))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[0]))
exutil.By("4.1 In each namespace, create two test pods, the first one on egressNode, the second one on nonEgressNode ")
exutil.By("4.2 Apply label to all pods that matches podSelector definied in egressIP object")
testpods1 := make([]pingPodResourceNode, len(udnNS))
testpods2 := make([]pingPodResourceNode, len(udnNS))
for i := 0; i < len(udnNS); i++ {
// testpods1 are pods on egressNode
testpods1[i] = pingPodResourceNode{
name: "hello-pod1-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[0],
template: pingPodNodeTemplate,
}
testpods1[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods1[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods1[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
// testpods2 are pods on nonEgressNode, egressNodes[1] is currently not a egress node as it has not been labelled with egressNodeLabel yet
testpods2[i] = pingPodResourceNode{
name: "hello-pod2-" + udnNS[i],
namespace: udnNS[i],
nodename: egressNodes[1],
template: pingPodNodeTemplate,
}
testpods2[i].createPingPodNode(oc)
waitPodReady(oc, udnNS[i], testpods2[i].name)
defer exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color-")
err = exutil.LabelPod(oc, udnNS[i], testpods2[i].name, "color=pink")
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("5. Verify egressIP from each namespace, egress traffic from these pods should use egressIP as their sourceIP regardless it is from overlapping or non-overlapping UDN")
var primaryInf, tcpdumpCmd, cmdOnPod string
var infErr error
exutil.By("5.1 Use tcpdump to verify egressIP.")
e2e.Logf("Trying to get physical interface on the egressNode %s", egressNodes[0])
primaryInf, infErr = getSnifPhyInf(oc, nodeList.Items[0].Name)
o.Expect(infErr).NotTo(o.HaveOccurred())
tcpdumpCmd = fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPod = getRequestURL("[" + dstHostv6 + "]")
exutil.By("5.2 Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
for i := 0; i < len(udnNS); i++ {
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[0], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
exutil.By("6. Label the second node with egressNodeLabel, unlabel the first node, verify egressIP still works after failover.\n")
e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[0], egressNodeLabel)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNodes[1], egressNodeLabel, "true")
exutil.By("7. Check the egress node was updated in the egressip object.\n")
egressipErr := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 360*time.Second, false, func(cxt context.Context) (bool, error) {
egressIPMaps1 = getAssignedEIPInEIPObject(oc, egressip1.name)
if len(egressIPMaps1) != 1 || egressIPMaps1[0]["node"] == egressNodes[0] {
e2e.Logf("Wait for new egress node applied,try next round.")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to update egress node:%v", egressipErr))
o.Expect(egressIPMaps1[0]["node"]).Should(o.ContainSubstring(egressNodes[1]))
exutil.By("8. Validate egressIP again after egressIP failover \n")
for i := 0; i < len(udnNS); i++ {
exutil.By("8.1 Use tcpdump captured on egressNode to verify egressIP from local pods after egressIP failover")
tcpdumOutput := getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods1[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
exutil.By("8.2 Use tcpdump captured on egressNode to verify egressIP from remote pods after egressIP failover")
tcpdumOutput = getTcpdumpOnNodeCmdFromPod(oc, egressNodes[1], tcpdumpCmd, udnNS[i], testpods2[i].name, cmdOnPod)
o.Expect(strings.Contains(tcpdumOutput, freeIPs[0])).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
887f0bd4-e71b-4a26-a2b4-f019441c52ea
|
Author:jechen-ConnectedOnly-NonPreRelease-High-78247-egressIP still works correctly after a UDN network gets deleted then recreated (layer3 + v6 or dualstack) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78247-egressIP still works correctly after a UDN network gets deleted then recreated (layer3 + v6 or dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode, for dualstack, need to label two nodes to be egressNodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2. Obtain a namespace ns1, apply to ns1 with label that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1 Create a layer3 UDN in ns1")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48"}
}
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-78247",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
// For dualstack, need to find out the actual nodes where v4 and v6 egressIP address are assigned
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In the namespace, create local test pod on egressNode, create remote test pod on nonEgressNode ")
var testpods []pingPodResourceNode
var testpod1 pingPodResourceNode
if ipStackType == "dualstack" {
// testpod1 is local pod on assignedEIPNodev4 for dualstack
testpod1 = pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpod1.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod1.name)
testpods = append(testpods, testpod1)
}
// testpod2 is local pod on assignedEIPNodev6 for dualstack
testpod2 := pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpod2.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod2.name)
testpods = append(testpods, testpod2)
// testpod3 is remote pod on the other non-egress node
testpod3 := pingPodResourceNode{
name: "hello-pod3-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpod3.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod3.name)
testpods = append(testpods, testpod3)
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
var dstHostv4, tcpdumpCmdv4, cmdOnPodv4, tcpdumpCmdv6, cmdOnPodv6 string
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
dstHostv4 = nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 = getRequestURL(dstHostv4)
exutil.By("6.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 = fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 = getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("7. Delete local and remote test pods that are associated with UDN in ns1 first, then delete the UDN.\n")
for i := 0; i < len(testpods); i++ {
removeResource(oc, true, true, "pod", testpods[i].name, "-n", testpods[i].namespace)
}
removeResource(oc, true, true, "UserDefinedNetwork", "udn-network-layer3-"+ns1, "-n", ns1)
exutil.By("8. Recreate the UDN and local/remote test pods in ns1.\n")
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
for i := 0; i < len(testpods); i++ {
testpods[i].createPingPodNode(oc)
waitPodReady(oc, ns1, testpods[i].name)
}
exutil.By("9. Validate egressIP again from local and remote pods after recreating UDN \n")
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
exutil.By("9.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("9.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
exutil.By("9.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("9.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ca1fa1f3-5f43-4ad9-a3fd-1ccd8b8a1a24
|
Author:jechen-ConnectedOnly-NonPreRelease-High-78274-egressIP still works correctly after OVNK restarted on local and remote client host (layer3 + v6 or dualstack) [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressip_udn.go
|
g.It("Author:jechen-ConnectedOnly-NonPreRelease-High-78274-egressIP still works correctly after OVNK restarted on local and remote client host (layer3 + v6 or dualstack) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIP1Template = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1. Get node list, apply EgressLabel Key to one node to make it egressNode, for dualstack, need to label two nodes to be egressNodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
var egressNode1, egressNode2, nonEgressNode string
var freeIPs []string
if ipStackType == "dualstack" && len(nodeList.Items) < 3 {
g.Skip("Need 3 nodes for the test on dualstack cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "ipv6single" && len(nodeList.Items) < 2 {
g.Skip("Need 2 nodes for the test on singlev6 cluster, the prerequirement was not fullfilled, skip the case!!")
}
if ipStackType == "dualstack" {
egressNode1 = nodeList.Items[0].Name
egressNode2 = nodeList.Items[1].Name
nonEgressNode = nodeList.Items[2].Name
freeIPs = findFreeIPs(oc, egressNode1, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPv6s := findFreeIPv6s(oc, egressNode2, 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
freeIPs = append(freeIPs, freeIPv6s[0])
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode2, egressNodeLabel, "true")
} else if ipStackType == "ipv6single" {
egressNode1 = nodeList.Items[0].Name
nonEgressNode = nodeList.Items[1].Name
freeIPs = findFreeIPv6s(oc, egressNode1, 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
}
e2e.Logf("egressIPs to use: %s", freeIPs)
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode1, egressNodeLabel, "true")
exutil.By("2. Obtain a namespace, apply a label to the namespace that matches namespaceSelector definied in egressIP object")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. Create a layer3 UDN in ns1")
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/48"}
} else {
ipv4cidr = []string{"10.150.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/48"}
}
for i := 0; i < 2; i++ {
if ipStackType == "ipv6single" {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, "", "", cidr[0], "layer3")
} else {
createGeneralUDNCRD(oc, ns1, "udn-network-layer3-"+ns1, ipv4cidr[0], ipv6cidr[0], "", "layer3")
}
}
exutil.By("4. Create an egressip object")
egressip1 := egressIPResource1{
name: "egressip-78274",
template: egressIP1Template,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip1.deleteEgressIPObject1(oc)
egressip1.createEgressIPObject1(oc)
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressip1.name)
// For dualstack, need to find out the actual nodes where v4 and v6 egressIP address are assigned
var assignedEIPNodev4, assignedEIPNodev6, assignedEIPv6Addr string
if ipStackType == "dualstack" {
o.Expect(len(egressIPMaps1) == 2).Should(o.BeTrue())
for _, eipMap := range egressIPMaps1 {
if netutils.IsIPv4String(eipMap["egressIP"]) {
assignedEIPNodev4 = eipMap["node"]
}
if netutils.IsIPv6String(eipMap["egressIP"]) {
assignedEIPNodev6 = eipMap["node"]
assignedEIPv6Addr = eipMap["egressIP"]
}
}
o.Expect(assignedEIPNodev4).NotTo(o.Equal(""))
o.Expect(assignedEIPNodev6).NotTo(o.Equal(""))
e2e.Logf("For the dualstack EIP, v4 EIP is currently assigned to node: %s, v6 EIP is currently assigned to node: %s", assignedEIPNodev4, assignedEIPNodev6)
} else {
o.Expect(len(egressIPMaps1) == 1).Should(o.BeTrue())
assignedEIPNodev6 = egressNode1
assignedEIPv6Addr = egressIPMaps1[0]["egressIP"]
}
exutil.By("5.1 In the namespace, create local test pod on egressNode, create remote test pod on nonEgressNode ")
var testpod1, testpod2, testpod3 pingPodResourceNode
if ipStackType == "dualstack" {
// testpod1 is local pod on assignedEIPNodev4 for dualstack
testpod1 = pingPodResourceNode{
name: "hello-pod1-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev4,
template: pingPodNodeTemplate,
}
testpod1.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod1.name)
}
// testpod2 is local pod on assignedEIPNodev6 for dualstack
testpod2 = pingPodResourceNode{
name: "hello-pod2-" + ns1,
namespace: ns1,
nodename: assignedEIPNodev6,
template: pingPodNodeTemplate,
}
testpod2.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod2.name)
// testpod3 is remote pod on the other non-egress node
testpod3 = pingPodResourceNode{
name: "hello-pod3-" + ns1,
namespace: ns1,
nodename: nonEgressNode,
template: pingPodNodeTemplate,
}
testpod3.createPingPodNode(oc)
waitPodReady(oc, ns1, testpod3.name)
exutil.By("6. Use tcpdump captured on egressNode to verify egressIP from local pods and remote pods")
primaryInf, infErr := getSnifPhyInf(oc, egressNode1)
o.Expect(infErr).NotTo(o.HaveOccurred())
var dstHostv4, tcpdumpCmdv4, cmdOnPodv4, tcpdumpCmdv6, cmdOnPodv6 string
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
dstHostv4 = nslookDomainName("ifconfig.me")
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmdv4 = fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s host %s", primaryInf, dstHostv4)
_, cmdOnPodv4 = getRequestURL(dstHostv4)
exutil.By("6.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("6.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
tcpdumpCmdv6 = fmt.Sprintf("timeout 60s tcpdump -c 3 -nni %s ip6 and host %s", primaryInf, dstHostv6)
_, cmdOnPodv6 = getRequestURL("[" + dstHostv6 + "]")
exutil.By("6.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("6.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("7. Restart ovnkube-node pod of client host that local egressIP pod is on.\n")
// Since local egressIP pod is on egress node, so just to restart ovnkube-pod of egress node
ovnkPod := ovnkubeNodePod(oc, egressNode1)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
if ipStackType == "dualstack" {
ovnkPod := ovnkubeNodePod(oc, egressNode2)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
}
exutil.By("8. Validate egressIP again from local and remote pods after recreating UDN \n")
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
exutil.By("8.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("8.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
exutil.By("8.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("8.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("9. Restart ovnkube-node pod of client host that remote egressIP pod is on.\n")
// Since local egressIP pod is on egress node, so just to restart ovnkube-pod of egress node
ovnkPod = ovnkubeNodePod(oc, nonEgressNode)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnkPod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.By("10. Validate egressIP again from local and remote pods after recreating UDN \n")
if ipStackType == "dualstack" {
exutil.By("Verify egressIP from IPv4 perspective")
exutil.By("10.1 Verify v4 egressIP from test pods local to egress node")
tcpdumOutputv4 := getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod1.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
exutil.By("10.2 Verify v4 egressIP from test pods remote to egress node")
tcpdumOutputv4 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev4, tcpdumpCmdv4, ns1, testpod3.name, cmdOnPodv4)
o.Expect(strings.Contains(tcpdumOutputv4, freeIPs[0])).To(o.BeTrue())
}
exutil.By("Verify egressIP from IPv6 perspective")
exutil.By("10.3 Verify v6 egressIP from test pods local to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod2.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
exutil.By("10.4 Verify v6 egressIP from test pods remote to egress node")
tcpdumOutputv6 = getTcpdumpOnNodeCmdFromPod(oc, assignedEIPNodev6, tcpdumpCmdv6, ns1, testpod3.name, cmdOnPodv6)
o.Expect(strings.Contains(tcpdumOutputv6, assignedEIPv6Addr)).To(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
edd56e64-6566-40be-921d-8673638e71a4
|
egressqos
|
import (
"fmt"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
package networking
import (
"fmt"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = g.Describe("[sig-networking] SDN egressqos", func() {
defer g.GinkgoRecover()
var (
dscpSvcIP string
externalPrivateIP string
dscpSvcPort = "9096"
a *exutil.AwsClient
oc = exutil.NewCLI("networking-"+getRandomString(), exutil.KubeConfigPath())
)
g.BeforeEach(func() {
platform := exutil.CheckPlatform(oc)
networkType := checkNetworkType(oc)
e2e.Logf("\n\nThe platform is %v, networkType is %v\n", platform, networkType)
acceptedPlatform := strings.Contains(platform, "aws")
if !acceptedPlatform || !strings.Contains(networkType, "ovn") {
g.Skip("Test cases should be run on AWS cluster with ovn network plugin, skip for other platforms or other non-OVN network plugin!!")
}
switch platform {
case "aws":
e2e.Logf("\n AWS is detected, running the case on AWS\n")
if dscpSvcIP == "" {
getAwsCredentialFromCluster(oc)
a = exutil.InitAwsSession()
_, err := getAwsIntSvcInstanceID(a, oc)
if err != nil {
e2e.Logf("There is no int svc instance in this cluster, %v", err)
g.Skip("There is no int svc instance in this cluster, skip the cases!!")
}
ips := getAwsIntSvcIPs(a, oc)
publicIP, ok := ips["publicIP"]
if !ok {
e2e.Logf("no public IP found for Int Svc instance")
}
privateIP, ok1 := ips["privateIP"]
if !ok1 {
e2e.Logf("no private IP found for Int Svc instance")
}
dscpSvcIP = publicIP
externalPrivateIP = privateIP
err = installDscpServiceOnAWS(a, oc, publicIP)
if err != nil {
e2e.Logf("No dscp-echo service installed on the bastion host, %v", err)
g.Skip("No dscp-echo service installed on the bastion host, skip the cases!!")
}
}
default:
e2e.Logf("cloud provider %v is not supported for auto egressqos cases for now", platform)
g.Skip("cloud provider %v is not supported for auto egressqos cases for now, skip the cases!")
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51732-EgressQoS resource applies only to its namespace.", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
dscpValue1 = 40
dscpValue2 = 30
dstCIDR = dscpSvcIP + "/" + "32"
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
)
exutil.By("1) ####### Create egressqos and testpod in one namespace ##########")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
e2e.Logf("create namespace %s", ns1)
egressQos1 := egressQosResource{
name: "default",
namespace: ns1,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod1 := egressQosResource{
name: "test-pod",
namespace: ns1,
kind: "pod",
tempfile: testPodTmpFile,
}
defer egressQos1.delete(oc)
egressQos1.create(oc, "NAME="+egressQos1.name, "NAMESPACE="+egressQos1.namespace, "CIDR1="+dstCIDR, "CIDR2="+"1.1.1.1/32")
defer testPod1.delete(oc)
testPod1.create(oc, "NAME="+testPod1.name, "NAMESPACE="+testPod1.namespace)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("testpod isn't ready"))
exutil.By("2) ####### Create egressqos and testpod in a new namespace ##########")
oc.SetupProject()
ns2 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns2)
e2e.Logf("create namespace %s", ns2)
egressQos2 := egressQosResource{
name: "default",
namespace: ns2,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod2 := egressQosResource{
name: "test-pod",
namespace: ns2,
kind: "pod",
tempfile: testPodTmpFile,
}
defer egressQos2.delete(oc)
egressQos2.create(oc, "NAME="+egressQos2.name, "NAMESPACE="+egressQos2.namespace, "CIDR1="+"1.1.1.1/32", "CIDR2="+dstCIDR)
defer testPod2.delete(oc)
testPod2.create(oc, "NAME="+testPod2.name, "NAMESPACE="+testPod2.namespace)
errPodRdy2 := waitForPodWithLabelReady(oc, ns2, "name="+testPod2.name)
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("testpod isn't ready"))
exutil.By("3) ####### Try to create a new egressqos in ns2 ##########")
egressQos3 := egressQosResource{
name: "newegressqos",
namespace: ns2,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
output, _ := egressQos3.createWithOutput(oc, "NAME="+egressQos3.name, "NAMESPACE="+egressQos3.namespace, "CIDR1="+"1.1.1.1/32", "CIDR2="+dstCIDR)
//Only one egressqos is permitted for one namespace
o.Expect(output).Should(o.ContainSubstring("Invalid value"))
exutil.By("4) ####### Check dscp value of egress traffic of ns1 ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod1.namespace, testPod1.name, dscpSvcIP, dscpSvcPort)
chkRes1 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue1)
o.Expect(chkRes1).Should(o.BeTrue())
exutil.By("5 ####### Check dscp value of egress traffic of ns2 ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes2 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile2, dscpValue2)
o.Expect(chkRes2).Should(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51749-if ipv4 egress traffic matches multiple egressqos rules, the first one will take effect.", func() {
exutil.By("1) ############## create egressqos and testpod #################")
var (
dscpValue = 40
dstCIDR = dscpSvcIP + "/" + "32"
pktFile = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := egressQosResource{
name: "test-pod",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
//egressqos has two rules which can match egress traffic
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "CIDR2="+dstCIDR)
defer testPod.delete(oc)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("2) ####### Check dscp value of egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
// the first matched egressqos rule can take effect
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51751-if egress traffic doesn't match egressqos rules, dscp value will not change.", func() {
var (
dscpValue1 = 40
dscpValue2 = 30
dscpValue = 0
pktFile = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := egressQosResource{
name: "test-pod",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
exutil.By("1) ############## create egressqos and testpod #################")
//egressqos has two rules which neither matches egress traffic
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"1.1.1.1/32", "CIDR2="+"2.2.2.2/32")
defer testPod.delete(oc)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("2) ####### Check dscp value of egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
// dscp value of egress traffic doesn't change
chkRes1 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue1)
o.Expect(chkRes1).Should(o.Equal(false))
chkRes2 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue2)
o.Expect(chkRes2).Should(o.Equal(false))
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51839-egressqos can work fine when new/update/delete matching pods.", func() {
var (
dscpValue1 = 40
dscpValue2 = 30
priorityValue = "Critical"
dstCIDR = dscpSvcIP + "/" + "32"
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
pktFile3 = getRandomString() + "pcap.txt"
pktFile4 = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod1 := egressQosResource{
name: "testpod1",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
testPod2 := egressQosResource{
name: "testpod2",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+testPod1.name)
exutil.By("2) ####### Create testpod1 which match the second podselector ##########")
defer testPod1.delete(oc)
testPod1.create(oc, "NAME="+testPod1.name, "NAMESPACE="+testPod1.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("3) ####### Check dscp value in egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod1.namespace, testPod1.name, dscpSvcIP, dscpSvcPort)
chkRes1 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue2)
o.Expect(chkRes1).Should(o.BeTrue())
exutil.By("4) ####### Create testpod2 which match the second podselector ##########")
defer testPod2.delete(oc)
testPod2.create(oc, "NAME="+testPod2.name, "NAMESPACE="+testPod2.namespace)
errPodRdy = waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod2.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("5) ####### Check dscp value in egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes2 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue2)
o.Expect(chkRes2).Should(o.BeTrue())
exutil.By("6) ####### Update testpod2 label to match the first egressqos rule ##########")
defer exutil.LabelPod(oc, testPod2.namespace, testPod2.name, "priority-")
err := exutil.LabelPod(oc, testPod2.namespace, testPod2.name, "priority="+priorityValue)
o.Expect(err).NotTo(o.HaveOccurred())
defer rmPktsFile(a, oc, dscpSvcIP, pktFile3)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile3)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes3 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile3, dscpValue1)
o.Expect(chkRes3).Should(o.BeTrue())
exutil.By("7) ####### Remove testpod1 and check egress traffic ##########")
testPod1.delete(oc)
defer rmPktsFile(a, oc, dscpSvcIP, pktFile4)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile4)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes4 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile4, dscpValue1)
o.Expect(chkRes4).Should(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51840-egressqos can work fine when new/update/delete egressqos rules", func() {
var (
dscpValue1 = 40
dscpValue2 = 30
dscpValue3 = 0
dscpValue4 = 20
priorityValue = "Critical"
dstCIDR = dscpSvcIP + "/" + "32"
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
pktFile3 = getRandomString() + "pcap.txt"
pktFile4 = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := egressQosResource{
name: "testpod",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+testPod.name)
exutil.By("2) ####### Create testpod1 which match the second podselector ##########")
defer testPod.delete(oc)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
//label testpod with priority Critical
err := exutil.LabelPod(oc, testPod.namespace, testPod.name, "priority="+priorityValue)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) ####### Check dscp value in egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue1)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("4) ####### Change egressqos rule and send traffic again ##########")
patchYamlToRestore := `[{"op":"replace","path":"/spec/egress/0/podSelector/matchLabels/priority","value":"Low"}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "-p", patchYamlToRestore).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("egressqos.k8s.ovn.org/default patched"))
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile2, dscpValue2)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("5) ####### delete egressqos rule and send traffic again ##########")
patchYamlToRestore = `[{"op":"remove","path":"/spec/egress/1"}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "-p", patchYamlToRestore).Output()
//output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "--patch-file", patchFile2).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("egressqos.k8s.ovn.org/default patched"))
defer rmPktsFile(a, oc, dscpSvcIP, pktFile3)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile3)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile3, dscpValue3)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("6) ####### add new egressqos rule and send traffic again ##########")
patchYamlToRestore = `[{"op": "add", "path": "/spec/egress/1", "value":{"dscp":20,"dstCIDR": "0.0.0.0/0"}}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "-p", patchYamlToRestore).Output()
//output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "--patch-file", patchFile3).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("egressqos.k8s.ovn.org/default patched"))
defer rmPktsFile(a, oc, dscpSvcIP, pktFile4)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile4)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile4, dscpValue4)
o.Expect(chkRes).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-74098-egressqos status is correct", func() {
var (
priorityValue = "Critical"
dstCIDR = dscpSvcIP + "/" + "32"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
egressQos := egressQosResource{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+"testPod")
exutil.By("2) ####### check egressqos status info is correct ##########")
statusInfo, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressqos", "default", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(statusInfo, "STATUS")).To(o.BeTrue())
o.Expect(strings.Contains(statusInfo, "EgressQoS Rules applied")).To(o.BeTrue())
exutil.By("3) ####### check egressqos status detail info is correct ##########")
chkEgressQosStatus(oc, ns)
})
g.It("Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-74204-egressqos addressset updated correctly", func() {
var (
priorityValue = "Minor"
dstCIDR = dscpSvcIP + "/" + "32"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
testPodTmpFile = filepath.Join(networkBaseDir, "ping-for-pod-specific-node-template.yaml")
podLable = "egress-qos-pod"
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
workerNodeList := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker")
if len(workerNodeList) < 2 {
g.Skip("These cases can only be run for cluster that has at least two worker nodes")
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
egressQos := egressQosResource{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+podLable)
exutil.By("2) ####### Create 2 testpods on different nodes which don't match the any egressqos rule ##########")
// create 2 testpods which located on different nodes
testPod1 := egressQosResource{
name: "testpod1",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
defer testPod1.delete(oc)
testPod1.create(oc, "NAME="+testPod1.name, "NAMESPACE="+testPod1.namespace, "NODENAME="+workerNodeList[0])
testPod2 := egressQosResource{
name: "testpod2",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
defer testPod2.delete(oc)
testPod2.create(oc, "NAME="+testPod2.name, "NAMESPACE="+testPod2.namespace, "NODENAME="+workerNodeList[1])
errPodRdy := waitForPodWithLabelReady(oc, ns, "name=hello-pod")
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod1 isn't ready"))
exutil.By("4) ####### Check egressqos addresset. ##########")
addSet1 := getEgressQosAddSet(oc, workerNodeList[0], ns)
addSet2 := getEgressQosAddSet(oc, workerNodeList[1], ns)
chkAddSet(oc, testPod1.name, ns, addSet1, false)
chkAddSet(oc, testPod2.name, ns, addSet1, false)
chkAddSet(oc, testPod1.name, ns, addSet2, false)
chkAddSet(oc, testPod2.name, ns, addSet2, false)
exutil.By("5) ####### update testpod1 to match egressqos rule. Only addresset on worker0 updated ##########")
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", testPod1.name, "name="+podLable, "-n", testPod1.namespace, "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
addSet1 = getEgressQosAddSet(oc, workerNodeList[0], ns)
addSet2 = getEgressQosAddSet(oc, workerNodeList[1], ns)
chkAddSet(oc, testPod1.name, ns, addSet1, true)
chkAddSet(oc, testPod2.name, ns, addSet1, false)
chkAddSet(oc, testPod1.name, ns, addSet2, false)
chkAddSet(oc, testPod2.name, ns, addSet2, false)
exutil.By("6) ####### update testpod2 to match egressqos rule. Only addresset on worker1 updated ##########")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", testPod2.name, "priority="+priorityValue, "-n", testPod1.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
addSet1 = getEgressQosAddSet(oc, workerNodeList[0], ns)
addSet2 = getEgressQosAddSet(oc, workerNodeList[1], ns)
chkAddSet(oc, testPod1.name, ns, addSet1, true)
chkAddSet(oc, testPod2.name, ns, addSet1, false)
chkAddSet(oc, testPod1.name, ns, addSet2, false)
chkAddSet(oc, testPod2.name, ns, addSet2, true)
})
// author: [email protected]
g.It("Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-73642-Egress traffic with EgressIP and EgressQos applied can work fine.[Disruptive]", func() {
exutil.By("1) ############## create egressqos and egressip #################")
var (
dstCIDR = externalPrivateIP + "/" + "32"
dscpValue = 40
pktFile = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
egressIPTemplate = filepath.Join(networkBaseDir, "egressip-config2-template.yaml")
egressNodeLabel = "k8s.ovn.org/egress-assignable"
podLabelKey = "color"
podLabelValue = "blue"
nodeLabelKey = "name"
nodeLabelValue = "test"
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
workers := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker")
exutil.By("Apply label to namespace\n")
nsLabel := nodeLabelKey + "=" + nodeLabelValue
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, nsLabel).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create an egressip object\n")
exutil.By("Apply EgressLabel Key to one node.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], egressNodeLabel, "true")
freeIPs := findFreeIPs(oc, workers[0], 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip := networkingRes{
name: "egressip-" + getRandomString(),
namespace: ns,
kind: "egressip",
tempfile: egressIPTemplate,
}
defer removeResource(oc, true, true, "egressip", egressip.name)
egressip.create(oc, "NAME="+egressip.name, "EGRESSIP1="+freeIPs[0], "NSLABELKEY="+nodeLabelKey, "NSLABELVALUE="+nodeLabelValue,
"PODLABELKEY="+podLabelKey, "PODLABELVALUE="+podLabelValue)
verifyExpectedEIPNumInEIPObject(oc, egressip.name, 1)
egressQos := networkingRes{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := networkingRes{
name: "test-pod",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
//create egressqos
defer removeResource(oc, true, true, "egressqos", egressQos.name, "-n", egressQos.namespace)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "CIDR2="+dstCIDR)
defer removeResource(oc, true, true, "pod", testPod.name, "-n", testPod.namespace)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, ns, "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
podLable := podLabelKey + "=" + podLabelValue
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", testPod.name, "-n", testPod.namespace, podLable).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) ####### Check dscp value of egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile)
startCurlTraffic(oc, testPod.namespace, testPod.name, externalPrivateIP, dscpSvcPort)
// the first matched egressqos rule and egressip can take effect
chkRes := chkDSCPandEIPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue, freeIPs[0])
o.Expect(chkRes).Should(o.BeTrue())
})
g.It("Author:yingwang-High-74054-Egress traffic works with ANP, BANP and NP with EgressQos. [Disruptive]", func() {
var (
dstCIDR = externalPrivateIP + "/" + "32"
dscpValue = 40
testDataDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(testDataDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
banpRuleName = "banp-rule"
anpRuleName = "anp-rule"
)
ns := oc.Namespace()
exutil.By("####### 1. Create pod and egressqos #############")
egressQos := networkingRes{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := networkingRes{
name: "test-pod",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
//create egressqos
defer removeResource(oc, true, true, "egressqos", egressQos.name, "-n", egressQos.namespace)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "CIDR2="+dstCIDR)
defer removeResource(oc, true, true, "pod", testPod.name, "-n", testPod.namespace)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, ns, "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("########### 2. Create a Admin Network Policy with deny action ############")
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-74054",
subjectKey: matchLabelKey,
subjectVal: ns,
priority: 10,
ruleName: anpRuleName,
ruleAction: "Deny",
cidr: dstCIDR,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("############ 3. Verify ANP blocks matching egress traffic #############")
CurlPod2HostFail(oc, ns, testPod.name, externalPrivateIP, dscpSvcPort)
exutil.By("############## 4. edit ANP rule to allow egress traffic #############")
patchYamlToRestore := `[{"op":"replace","path":"/spec/egress/0/action","value":"Allow"}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "adminnetworkpolicy.policy.networking.k8s.io/anp-74054 patched")).To(o.BeTrue())
exutil.By("############# 5. check egress traffic can pass and dscp value is correct ###########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("############## 6. edit ANP rule to action pass #############")
patchYamlToRestore = `[{"op":"replace","path":"/spec/egress/0/action","value":"Pass"}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "adminnetworkpolicy.policy.networking.k8s.io/anp-74054 patched")).To(o.BeTrue())
exutil.By("############ 7. Create a Baseline Admin Network Policy with deny action ############")
banpCR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: ns,
ruleName: banpRuleName,
ruleAction: "Deny",
cidr: dstCIDR,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleCIDRBANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("############# 8. Verify BANP blocks matching egress traffic #########")
CurlPod2HostFail(oc, ns, testPod.name, externalPrivateIP, dscpSvcPort)
exutil.By("############ 9. edit BANP rule to allow egress traffic ###############")
patchYamlToRestore = `[{"op":"replace","path":"/spec/egress/0/action","value":"Allow"}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchYamlToRestore).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "baselineadminnetworkpolicy.policy.networking.k8s.io/default patched")).To(o.BeTrue())
exutil.By("############# 10. check egress traffic can pass and dscp value is correct #############")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile2, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
})
})
var _ = g.Describe("[sig-networking] SDN egressqos negative test", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-egressqos", exutil.KubeConfigPath())
)
g.It("Author:qiowang-NonHyperShiftHOST-Medium-52365-negative validation for egressqos.", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
invalideDstCIDR = []string{"abc/24", "$@#/132", "asd::/64", "1.2.3.4/58", "abc::/158"}
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
egressQos := egressQosResource{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
for _, cidr := range invalideDstCIDR {
exutil.By("####### Create egressqos with wrong syntax/value CIDR rules " + cidr + " ##########")
output, _ := egressQos.createWithOutput(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1=1.1.1.1/32", "CIDR2="+cidr)
o.Expect(output).Should(o.ContainSubstring("Invalid value"))
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
43498131-3435-4970-8d63-5fd67b538667
|
NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51732-EgressQoS resource applies only to its namespace.
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51732-EgressQoS resource applies only to its namespace.", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
dscpValue1 = 40
dscpValue2 = 30
dstCIDR = dscpSvcIP + "/" + "32"
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
)
exutil.By("1) ####### Create egressqos and testpod in one namespace ##########")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
e2e.Logf("create namespace %s", ns1)
egressQos1 := egressQosResource{
name: "default",
namespace: ns1,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod1 := egressQosResource{
name: "test-pod",
namespace: ns1,
kind: "pod",
tempfile: testPodTmpFile,
}
defer egressQos1.delete(oc)
egressQos1.create(oc, "NAME="+egressQos1.name, "NAMESPACE="+egressQos1.namespace, "CIDR1="+dstCIDR, "CIDR2="+"1.1.1.1/32")
defer testPod1.delete(oc)
testPod1.create(oc, "NAME="+testPod1.name, "NAMESPACE="+testPod1.namespace)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("testpod isn't ready"))
exutil.By("2) ####### Create egressqos and testpod in a new namespace ##########")
oc.SetupProject()
ns2 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns2)
e2e.Logf("create namespace %s", ns2)
egressQos2 := egressQosResource{
name: "default",
namespace: ns2,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod2 := egressQosResource{
name: "test-pod",
namespace: ns2,
kind: "pod",
tempfile: testPodTmpFile,
}
defer egressQos2.delete(oc)
egressQos2.create(oc, "NAME="+egressQos2.name, "NAMESPACE="+egressQos2.namespace, "CIDR1="+"1.1.1.1/32", "CIDR2="+dstCIDR)
defer testPod2.delete(oc)
testPod2.create(oc, "NAME="+testPod2.name, "NAMESPACE="+testPod2.namespace)
errPodRdy2 := waitForPodWithLabelReady(oc, ns2, "name="+testPod2.name)
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("testpod isn't ready"))
exutil.By("3) ####### Try to create a new egressqos in ns2 ##########")
egressQos3 := egressQosResource{
name: "newegressqos",
namespace: ns2,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
output, _ := egressQos3.createWithOutput(oc, "NAME="+egressQos3.name, "NAMESPACE="+egressQos3.namespace, "CIDR1="+"1.1.1.1/32", "CIDR2="+dstCIDR)
//Only one egressqos is permitted for one namespace
o.Expect(output).Should(o.ContainSubstring("Invalid value"))
exutil.By("4) ####### Check dscp value of egress traffic of ns1 ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod1.namespace, testPod1.name, dscpSvcIP, dscpSvcPort)
chkRes1 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue1)
o.Expect(chkRes1).Should(o.BeTrue())
exutil.By("5 ####### Check dscp value of egress traffic of ns2 ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes2 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile2, dscpValue2)
o.Expect(chkRes2).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
c006431a-dd0f-4628-a02d-6f1f39d0b431
|
NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51749-if ipv4 egress traffic matches multiple egressqos rules, the first one will take effect.
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51749-if ipv4 egress traffic matches multiple egressqos rules, the first one will take effect.", func() {
exutil.By("1) ############## create egressqos and testpod #################")
var (
dscpValue = 40
dstCIDR = dscpSvcIP + "/" + "32"
pktFile = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := egressQosResource{
name: "test-pod",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
//egressqos has two rules which can match egress traffic
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "CIDR2="+dstCIDR)
defer testPod.delete(oc)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("2) ####### Check dscp value of egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
// the first matched egressqos rule can take effect
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
158fb4be-0ad3-4411-93fc-34c88134369a
|
NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51751-if egress traffic doesn't match egressqos rules, dscp value will not change.
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51751-if egress traffic doesn't match egressqos rules, dscp value will not change.", func() {
var (
dscpValue1 = 40
dscpValue2 = 30
dscpValue = 0
pktFile = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := egressQosResource{
name: "test-pod",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
exutil.By("1) ############## create egressqos and testpod #################")
//egressqos has two rules which neither matches egress traffic
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"1.1.1.1/32", "CIDR2="+"2.2.2.2/32")
defer testPod.delete(oc)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("2) ####### Check dscp value of egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
// dscp value of egress traffic doesn't change
chkRes1 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue1)
o.Expect(chkRes1).Should(o.Equal(false))
chkRes2 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue2)
o.Expect(chkRes2).Should(o.Equal(false))
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
131f1ed6-6bb5-4b70-9d6d-a44967ed0a5a
|
NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51839-egressqos can work fine when new/update/delete matching pods.
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51839-egressqos can work fine when new/update/delete matching pods.", func() {
var (
dscpValue1 = 40
dscpValue2 = 30
priorityValue = "Critical"
dstCIDR = dscpSvcIP + "/" + "32"
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
pktFile3 = getRandomString() + "pcap.txt"
pktFile4 = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod1 := egressQosResource{
name: "testpod1",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
testPod2 := egressQosResource{
name: "testpod2",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+testPod1.name)
exutil.By("2) ####### Create testpod1 which match the second podselector ##########")
defer testPod1.delete(oc)
testPod1.create(oc, "NAME="+testPod1.name, "NAMESPACE="+testPod1.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("3) ####### Check dscp value in egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod1.namespace, testPod1.name, dscpSvcIP, dscpSvcPort)
chkRes1 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue2)
o.Expect(chkRes1).Should(o.BeTrue())
exutil.By("4) ####### Create testpod2 which match the second podselector ##########")
defer testPod2.delete(oc)
testPod2.create(oc, "NAME="+testPod2.name, "NAMESPACE="+testPod2.namespace)
errPodRdy = waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod2.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("5) ####### Check dscp value in egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes2 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue2)
o.Expect(chkRes2).Should(o.BeTrue())
exutil.By("6) ####### Update testpod2 label to match the first egressqos rule ##########")
defer exutil.LabelPod(oc, testPod2.namespace, testPod2.name, "priority-")
err := exutil.LabelPod(oc, testPod2.namespace, testPod2.name, "priority="+priorityValue)
o.Expect(err).NotTo(o.HaveOccurred())
defer rmPktsFile(a, oc, dscpSvcIP, pktFile3)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile3)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes3 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile3, dscpValue1)
o.Expect(chkRes3).Should(o.BeTrue())
exutil.By("7) ####### Remove testpod1 and check egress traffic ##########")
testPod1.delete(oc)
defer rmPktsFile(a, oc, dscpSvcIP, pktFile4)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile4)
startCurlTraffic(oc, testPod2.namespace, testPod2.name, dscpSvcIP, dscpSvcPort)
chkRes4 := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile4, dscpValue1)
o.Expect(chkRes4).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
4268fb06-c4ef-4aa3-ad86-d9cbd2cc65d9
|
NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51840-egressqos can work fine when new/update/delete egressqos rules
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-Author:yingwang-Medium-51840-egressqos can work fine when new/update/delete egressqos rules", func() {
var (
dscpValue1 = 40
dscpValue2 = 30
dscpValue3 = 0
dscpValue4 = 20
priorityValue = "Critical"
dstCIDR = dscpSvcIP + "/" + "32"
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
pktFile3 = getRandomString() + "pcap.txt"
pktFile4 = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
)
exutil.SetNamespacePrivileged(oc, oc.Namespace())
egressQos := egressQosResource{
name: "default",
namespace: oc.Namespace(),
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := egressQosResource{
name: "testpod",
namespace: oc.Namespace(),
kind: "pod",
tempfile: testPodTmpFile,
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+testPod.name)
exutil.By("2) ####### Create testpod1 which match the second podselector ##########")
defer testPod.delete(oc)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
//label testpod with priority Critical
err := exutil.LabelPod(oc, testPod.namespace, testPod.name, "priority="+priorityValue)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) ####### Check dscp value in egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue1)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("4) ####### Change egressqos rule and send traffic again ##########")
patchYamlToRestore := `[{"op":"replace","path":"/spec/egress/0/podSelector/matchLabels/priority","value":"Low"}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "-p", patchYamlToRestore).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("egressqos.k8s.ovn.org/default patched"))
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile2, dscpValue2)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("5) ####### delete egressqos rule and send traffic again ##########")
patchYamlToRestore = `[{"op":"remove","path":"/spec/egress/1"}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "-p", patchYamlToRestore).Output()
//output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "--patch-file", patchFile2).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("egressqos.k8s.ovn.org/default patched"))
defer rmPktsFile(a, oc, dscpSvcIP, pktFile3)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile3)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile3, dscpValue3)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("6) ####### add new egressqos rule and send traffic again ##########")
patchYamlToRestore = `[{"op": "add", "path": "/spec/egress/1", "value":{"dscp":20,"dstCIDR": "0.0.0.0/0"}}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "-p", patchYamlToRestore).Output()
//output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args(egressQos.kind, egressQos.name, "-n", egressQos.namespace, "--type=json", "--patch-file", patchFile3).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("egressqos.k8s.ovn.org/default patched"))
defer rmPktsFile(a, oc, dscpSvcIP, pktFile4)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile4)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile4, dscpValue4)
o.Expect(chkRes).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
1367fa81-63f5-4435-ad7b-d679680ce453
|
Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-74098-egressqos status is correct
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-74098-egressqos status is correct", func() {
var (
priorityValue = "Critical"
dstCIDR = dscpSvcIP + "/" + "32"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
egressQos := egressQosResource{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+"testPod")
exutil.By("2) ####### check egressqos status info is correct ##########")
statusInfo, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressqos", "default", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(statusInfo, "STATUS")).To(o.BeTrue())
o.Expect(strings.Contains(statusInfo, "EgressQoS Rules applied")).To(o.BeTrue())
exutil.By("3) ####### check egressqos status detail info is correct ##########")
chkEgressQosStatus(oc, ns)
})
| |||||
test case
|
openshift/openshift-tests-private
|
79e0cc0d-1022-4f06-b126-e925f7794ad8
|
Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-74204-egressqos addressset updated correctly
|
['"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-74204-egressqos addressset updated correctly", func() {
var (
priorityValue = "Minor"
dstCIDR = dscpSvcIP + "/" + "32"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-podselector-template.yaml")
testPodTmpFile = filepath.Join(networkBaseDir, "ping-for-pod-specific-node-template.yaml")
podLable = "egress-qos-pod"
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
workerNodeList := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker")
if len(workerNodeList) < 2 {
g.Skip("These cases can only be run for cluster that has at least two worker nodes")
}
exutil.By("1) ####### Create egressqos with podselector rules ##########")
egressQos := egressQosResource{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
defer egressQos.delete(oc)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "PRIORITY="+priorityValue, "CIDR2="+dstCIDR, "LABELNAME="+podLable)
exutil.By("2) ####### Create 2 testpods on different nodes which don't match the any egressqos rule ##########")
// create 2 testpods which located on different nodes
testPod1 := egressQosResource{
name: "testpod1",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
defer testPod1.delete(oc)
testPod1.create(oc, "NAME="+testPod1.name, "NAMESPACE="+testPod1.namespace, "NODENAME="+workerNodeList[0])
testPod2 := egressQosResource{
name: "testpod2",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
defer testPod2.delete(oc)
testPod2.create(oc, "NAME="+testPod2.name, "NAMESPACE="+testPod2.namespace, "NODENAME="+workerNodeList[1])
errPodRdy := waitForPodWithLabelReady(oc, ns, "name=hello-pod")
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod1 isn't ready"))
exutil.By("4) ####### Check egressqos addresset. ##########")
addSet1 := getEgressQosAddSet(oc, workerNodeList[0], ns)
addSet2 := getEgressQosAddSet(oc, workerNodeList[1], ns)
chkAddSet(oc, testPod1.name, ns, addSet1, false)
chkAddSet(oc, testPod2.name, ns, addSet1, false)
chkAddSet(oc, testPod1.name, ns, addSet2, false)
chkAddSet(oc, testPod2.name, ns, addSet2, false)
exutil.By("5) ####### update testpod1 to match egressqos rule. Only addresset on worker0 updated ##########")
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", testPod1.name, "name="+podLable, "-n", testPod1.namespace, "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
addSet1 = getEgressQosAddSet(oc, workerNodeList[0], ns)
addSet2 = getEgressQosAddSet(oc, workerNodeList[1], ns)
chkAddSet(oc, testPod1.name, ns, addSet1, true)
chkAddSet(oc, testPod2.name, ns, addSet1, false)
chkAddSet(oc, testPod1.name, ns, addSet2, false)
chkAddSet(oc, testPod2.name, ns, addSet2, false)
exutil.By("6) ####### update testpod2 to match egressqos rule. Only addresset on worker1 updated ##########")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", testPod2.name, "priority="+priorityValue, "-n", testPod1.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
addSet1 = getEgressQosAddSet(oc, workerNodeList[0], ns)
addSet2 = getEgressQosAddSet(oc, workerNodeList[1], ns)
chkAddSet(oc, testPod1.name, ns, addSet1, true)
chkAddSet(oc, testPod2.name, ns, addSet1, false)
chkAddSet(oc, testPod1.name, ns, addSet2, false)
chkAddSet(oc, testPod2.name, ns, addSet2, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
7d3a9f43-4444-4faf-a355-64b4f48278c6
|
Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-73642-Egress traffic with EgressIP and EgressQos applied can work fine.[Disruptive]
|
['"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("Author:yingwang-NonHyperShiftHOST-ConnectedOnly-Medium-73642-Egress traffic with EgressIP and EgressQos applied can work fine.[Disruptive]", func() {
exutil.By("1) ############## create egressqos and egressip #################")
var (
dstCIDR = externalPrivateIP + "/" + "32"
dscpValue = 40
pktFile = getRandomString() + "pcap.txt"
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
egressIPTemplate = filepath.Join(networkBaseDir, "egressip-config2-template.yaml")
egressNodeLabel = "k8s.ovn.org/egress-assignable"
podLabelKey = "color"
podLabelValue = "blue"
nodeLabelKey = "name"
nodeLabelValue = "test"
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
workers := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker")
exutil.By("Apply label to namespace\n")
nsLabel := nodeLabelKey + "=" + nodeLabelValue
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, nsLabel).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create an egressip object\n")
exutil.By("Apply EgressLabel Key to one node.")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[0], egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], egressNodeLabel, "true")
freeIPs := findFreeIPs(oc, workers[0], 1)
o.Expect(len(freeIPs)).Should(o.Equal(1))
egressip := networkingRes{
name: "egressip-" + getRandomString(),
namespace: ns,
kind: "egressip",
tempfile: egressIPTemplate,
}
defer removeResource(oc, true, true, "egressip", egressip.name)
egressip.create(oc, "NAME="+egressip.name, "EGRESSIP1="+freeIPs[0], "NSLABELKEY="+nodeLabelKey, "NSLABELVALUE="+nodeLabelValue,
"PODLABELKEY="+podLabelKey, "PODLABELVALUE="+podLabelValue)
verifyExpectedEIPNumInEIPObject(oc, egressip.name, 1)
egressQos := networkingRes{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := networkingRes{
name: "test-pod",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
//create egressqos
defer removeResource(oc, true, true, "egressqos", egressQos.name, "-n", egressQos.namespace)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "CIDR2="+dstCIDR)
defer removeResource(oc, true, true, "pod", testPod.name, "-n", testPod.namespace)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, ns, "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
podLable := podLabelKey + "=" + podLabelValue
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", testPod.name, "-n", testPod.namespace, podLable).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) ####### Check dscp value of egress traffic ##########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile)
startCurlTraffic(oc, testPod.namespace, testPod.name, externalPrivateIP, dscpSvcPort)
// the first matched egressqos rule and egressip can take effect
chkRes := chkDSCPandEIPinPkts(a, oc, dscpSvcIP, pktFile, dscpValue, freeIPs[0])
o.Expect(chkRes).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
f4521029-50ca-49f7-8ea7-e2e4de1a7957
|
Author:yingwang-High-74054-Egress traffic works with ANP, BANP and NP with EgressQos. [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("Author:yingwang-High-74054-Egress traffic works with ANP, BANP and NP with EgressQos. [Disruptive]", func() {
var (
dstCIDR = externalPrivateIP + "/" + "32"
dscpValue = 40
testDataDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(testDataDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
testPodTmpFile = filepath.Join(egressBaseDir, "testpod-template.yaml")
pktFile1 = getRandomString() + "pcap.txt"
pktFile2 = getRandomString() + "pcap.txt"
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-cidr-template.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-cidr-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
banpRuleName = "banp-rule"
anpRuleName = "anp-rule"
)
ns := oc.Namespace()
exutil.By("####### 1. Create pod and egressqos #############")
egressQos := networkingRes{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
testPod := networkingRes{
name: "test-pod",
namespace: ns,
kind: "pod",
tempfile: testPodTmpFile,
}
//create egressqos
defer removeResource(oc, true, true, "egressqos", egressQos.name, "-n", egressQos.namespace)
egressQos.create(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1="+"0.0.0.0/0", "CIDR2="+dstCIDR)
defer removeResource(oc, true, true, "pod", testPod.name, "-n", testPod.namespace)
testPod.create(oc, "NAME="+testPod.name, "NAMESPACE="+testPod.namespace)
errPodRdy := waitForPodWithLabelReady(oc, ns, "name="+testPod.name)
exutil.AssertWaitPollNoErr(errPodRdy, fmt.Sprintf("testpod isn't ready"))
exutil.By("########### 2. Create a Admin Network Policy with deny action ############")
anpCR := singleRuleCIDRANPPolicyResource{
name: "anp-74054",
subjectKey: matchLabelKey,
subjectVal: ns,
priority: 10,
ruleName: anpRuleName,
ruleAction: "Deny",
cidr: dstCIDR,
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpCR.name)
anpCR.createSingleRuleCIDRANP(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpCR.name)).To(o.BeTrue())
exutil.By("############ 3. Verify ANP blocks matching egress traffic #############")
CurlPod2HostFail(oc, ns, testPod.name, externalPrivateIP, dscpSvcPort)
exutil.By("############## 4. edit ANP rule to allow egress traffic #############")
patchYamlToRestore := `[{"op":"replace","path":"/spec/egress/0/action","value":"Allow"}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "adminnetworkpolicy.policy.networking.k8s.io/anp-74054 patched")).To(o.BeTrue())
exutil.By("############# 5. check egress traffic can pass and dscp value is correct ###########")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile1)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile1)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes := chkDSCPinPkts(a, oc, dscpSvcIP, pktFile1, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
exutil.By("############## 6. edit ANP rule to action pass #############")
patchYamlToRestore = `[{"op":"replace","path":"/spec/egress/0/action","value":"Pass"}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("adminnetworkpolicy", anpCR.name, "--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "adminnetworkpolicy.policy.networking.k8s.io/anp-74054 patched")).To(o.BeTrue())
exutil.By("############ 7. Create a Baseline Admin Network Policy with deny action ############")
banpCR := singleRuleCIDRBANPPolicyResource{
name: "default",
subjectKey: matchLabelKey,
subjectVal: ns,
ruleName: banpRuleName,
ruleAction: "Deny",
cidr: dstCIDR,
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleCIDRBANP(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("############# 8. Verify BANP blocks matching egress traffic #########")
CurlPod2HostFail(oc, ns, testPod.name, externalPrivateIP, dscpSvcPort)
exutil.By("############ 9. edit BANP rule to allow egress traffic ###############")
patchYamlToRestore = `[{"op":"replace","path":"/spec/egress/0/action","value":"Allow"}]`
output, err1 = oc.AsAdmin().WithoutNamespace().Run("patch").Args("baselineadminnetworkpolicy", banpCR.name, "--type=json", "-p", patchYamlToRestore).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "baselineadminnetworkpolicy.policy.networking.k8s.io/default patched")).To(o.BeTrue())
exutil.By("############# 10. check egress traffic can pass and dscp value is correct #############")
defer rmPktsFile(a, oc, dscpSvcIP, pktFile2)
startTcpdumpOnDscpService(a, oc, dscpSvcIP, pktFile2)
startCurlTraffic(oc, testPod.namespace, testPod.name, dscpSvcIP, dscpSvcPort)
chkRes = chkDSCPinPkts(a, oc, dscpSvcIP, pktFile2, dscpValue)
o.Expect(chkRes).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
d2807882-3071-4c60-a4c7-67c39e12cabe
|
Author:qiowang-NonHyperShiftHOST-Medium-52365-negative validation for egressqos.
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos.go
|
g.It("Author:qiowang-NonHyperShiftHOST-Medium-52365-negative validation for egressqos.", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
egressBaseDir = filepath.Join(networkBaseDir, "egressqos")
egressQosTmpFile = filepath.Join(egressBaseDir, "egressqos-template.yaml")
invalideDstCIDR = []string{"abc/24", "$@#/132", "asd::/64", "1.2.3.4/58", "abc::/158"}
)
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
egressQos := egressQosResource{
name: "default",
namespace: ns,
kind: "egressqos",
tempfile: egressQosTmpFile,
}
for _, cidr := range invalideDstCIDR {
exutil.By("####### Create egressqos with wrong syntax/value CIDR rules " + cidr + " ##########")
output, _ := egressQos.createWithOutput(oc, "NAME="+egressQos.name, "NAMESPACE="+egressQos.namespace, "CIDR1=1.1.1.1/32", "CIDR2="+cidr)
o.Expect(output).Should(o.ContainSubstring("Invalid value"))
}
})
| |||||
test
|
openshift/openshift-tests-private
|
d7aa0e37-801a-46c4-bc1e-75cbaca7859e
|
ipsec
|
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN IPSEC", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-ipsec", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := exutil.CheckNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip case on cluster that has non-OVN network plugin!!")
}
ipsecState := checkIPsec(oc)
if ipsecState != "{}" && ipsecState != "Full" {
g.Skip("IPsec not enabled, skiping test!")
}
})
// author: [email protected]
g.It("Author:rbrattai-High-66652-Verify IPsec encapsulation is enabled for NAT-T", func() {
// Epic https://issues.redhat.com/browse/SDN-2629
platform := checkPlatform(oc)
if !strings.Contains(platform, "ibmcloud") {
g.Skip("Test requires IBMCloud, skip for other platforms!")
}
ns := "openshift-ovn-kubernetes"
exutil.By("Checking ipsec_encapsulation in ovnkube-node pods")
podList, podListErr := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{
LabelSelector: "app=ovnkube-node",
})
o.Expect(podListErr).NotTo(o.HaveOccurred())
for _, pod := range podList.Items {
cmd := "ovn-nbctl --no-leader-only get NB_Global . options"
e2e.Logf("The command is: %v", cmd)
command1 := []string{"-n", ns, "-c", "nbdb", pod.Name, "--", "bash", "-c", cmd}
out, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(command1...).Output()
if err != nil {
e2e.Logf("Execute command failed with err:%v and output is %v.", err, out)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring(`ipsec_encapsulation="true"`))
}
})
// author: [email protected]
g.It("Author:huirwang-High-38846-Should be able to send node to node ESP traffic on IPsec clusters", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
hostnwPodTmp = filepath.Join(buildPruningBaseDir, "net-admin-cap-pod-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Obtain a namespace.")
ns1 := oc.Namespace()
//Required for hostnetwork pod
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create 1st hello pod in ns1")
//create hostnetwork pod on worker0 and worker1, reuse sriov functions for hostnetwork creation which is actually not related to sriov.
pod1 := sriovNetResource{
name: "host-pod1",
namespace: ns1,
tempfile: hostnwPodTmp,
kind: "pod",
}
pod2 := sriovNetResource{
name: "host-pod2",
namespace: ns1,
tempfile: hostnwPodTmp,
kind: "pod",
}
pod1.create(oc, "PODNAME="+pod1.name, "NODENAME="+nodeList.Items[0].Name)
defer pod1.delete(oc)
pod2.create(oc, "PODNAME="+pod2.name, "NODENAME="+nodeList.Items[1].Name)
defer pod2.delete(oc)
errPodRdy5 := waitForPodWithLabelReady(oc, ns1, "name="+pod1.name)
exutil.AssertWaitPollNoErr(errPodRdy5, "hostnetwork pod isn't ready")
errPodRdy6 := waitForPodWithLabelReady(oc, ns1, "name="+pod2.name)
exutil.AssertWaitPollNoErr(errPodRdy6, "hostnetwork pod isn't ready")
exutil.By("Send ESP traffic from pod1")
nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[1].Name)
socatCmd := fmt.Sprintf("nohup socat /dev/random ip-sendto:%s:50", nodeIP2)
e2e.Logf("The socat command is %s", socatCmd)
cmdSocat, _, _, _ := oc.Run("exec").Args("-n", ns1, pod2.name, "--", "bash", "-c", socatCmd).Background()
defer cmdSocat.Process.Kill()
exutil.By("Start tcpdump from pod2.")
tcpdumpCmd := "timeout --preserve-status 60 tcpdump -c 2 -i br-ex \"esp and less 1500\" "
e2e.Logf("The tcpdump command is %s", tcpdumpCmd)
outputTcpdump, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, tcpdumpCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify ESP packets can be captured on pod2.")
o.Expect(outputTcpdump).NotTo(o.ContainSubstring("0 packets captured"))
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
exutil.By("Retest with IPv6 address")
exutil.By("Send ESP traffic from pod1")
socatCmd := fmt.Sprintf("nohup socat /dev/random ip-sendto:%s:50", nodeIP1)
e2e.Logf("The socat command is %s", socatCmd)
cmdSocat, _, _, _ := oc.Run("exec").Args("-n", ns1, pod2.name, "--", "bash", "-c", socatCmd).Background()
defer cmdSocat.Process.Kill()
exutil.By("Start tcpdump from pod2.")
tcpdumpCmd := "timeout --preserve-status 60 tcpdump -c 2 -i br-ex \"esp and less 1500\" "
e2e.Logf("The tcpdump command is %s", tcpdumpCmd)
outputTcpdump, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, tcpdumpCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify ESP packets can be captured on pod2.")
o.Expect(outputTcpdump).NotTo(o.ContainSubstring("0 packets captured"))
}
})
// author: [email protected]
g.It("Author:huirwang-High-38845-High-37590-Restarting pluto daemon, restarting ovn-ipsec pods, pods connection should not be broken. [Disruptive]", func() {
exutil.By("Get one worker node.")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) > 0).Should(o.BeTrue())
exutil.By("kill pluto on one node.")
pkillCmd := "pkill -SEGV pluto"
_, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", pkillCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the ipsec pods ")
//Need to give it some hard coded time for ovn-ipsec pod to notice segfault
ovnNS := "openshift-ovn-kubernetes"
time.Sleep(90 * time.Second)
err = waitForPodWithLabelReady(oc, ovnNS, "app=ovn-ipsec")
exutil.AssertWaitPollNoErr(err, "ipsec pods are not ready after killing pluto")
exutil.By("Restart ipsec pods")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", ovnNS, "-l", "app=ovn-ipsec").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ovnNS, "app=ovn-ipsec")
exutil.AssertWaitPollNoErr(err, "ipsec pods are not ready after killing pluto")
exutil.By("Verify pods connection cross nodes after restarting ipsec pods")
pass := verifyPodConnCrossNodes(oc)
if !pass {
g.Fail("Pods connection checking cross nodes failed!!")
}
})
// author: [email protected]
g.It("Author:huirwang-Critical-79184-pod2pod cross nodes traffic should work and not broken.", func() {
exutil.By("Verify pods to pods connection cross nodes.")
pass := verifyPodConnCrossNodes(oc)
if !pass {
g.Fail("Pods connection checking cross nodes failed!!")
}
})
})
var _ = g.Describe("[sig-networking] SDN IPSEC NS", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-ipsec-ns", exutil.KubeConfigPath())
leftPublicIP string
rightIP string
rightIP2 string
leftIP string
nodeCert string
nodeCert2 string
rightNode string
rightNode2 string
ipsecTunnel string
platformvar string
)
g.BeforeEach(func() {
platform := exutil.CheckPlatform(oc)
if !(strings.Contains(platform, "gcp") || strings.Contains(platform, "baremetal")) {
g.Skip("Test cases should be run on GCP/RDU2 cluster with ovn network plugin, skip for other platforms !!")
}
ipsecState := checkIPsec(oc)
if ipsecState == "Disabled" {
g.Skip("IPsec not enabled, skiping test!")
}
switch platform {
case "gcp":
infraID, err := exutil.GetInfraID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
leftPublicIP, err = getIntSvcExternalIPFromGcp(oc, infraID)
if leftPublicIP == "" || err != nil {
g.Skip("There is no int-svc bastion host in the cluster, skip the ipsec NS test cases.")
} else {
ipsecTunnel = "VM-128-2"
rightIP = "10.0.128.2"
rightIP2 = "10.0.128.3"
leftIP = "10.0.0.2"
nodeCert = "10_0_128_2"
nodeCert2 = "10_0_128_3"
}
case "baremetal":
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case needs to be run on GCP or RDU2 cluster, skip other platforms!!!")
}
ipsecTunnel = "pluto-rdu2-VM"
rightIP = "192.168.111.23"
rightIP2 = "192.168.111.24"
leftIP = "10.0.185.155"
nodeCert = "proxy_cert" //on RDU2 setup, since nodes are NAT'd and not accessible from ext VM, IPsec tunnels terminates at proxies and proxy reinitiate tunnels with worker nodes
nodeCert2 = "proxy_cert" //so both nodes will have same proxy_cert with extSAN of proxy IP
leftPublicIP = leftIP
platformvar = "rdu2"
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
// As not the all gcp with int-svc have the ipsec NS enabled, still need to filter the ipsec NS enabled or not
rightNode = getNodeNameByIPv4(oc, rightIP)
rightNode2 = getNodeNameByIPv4(oc, rightIP2)
if rightNode == "" {
g.Skip(fmt.Sprintf("There is no worker node with IPSEC rightIP %v, skip the testing.", rightIP))
}
//With 4.15+, filter the cluster by checking if existing ipsec config on external host.
err = sshRunCmd(leftPublicIP, "core", "sudo ls -l /etc/ipsec.d/nstest.conf && sudo systemctl restart ipsec")
if err != nil {
g.Skip("No IPSEC configurations on external host, skip the test!!")
}
//check if IPsec packages are present on the cluster
rpm_output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "rpm -qa | grep -i libreswan")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Confirm if required libreswan and NetworkManager-libreswan packagaes are present on node before validating IPsec usecases")
o.Expect(strings.Contains(rpm_output, "libreswan-")).To(o.BeTrue())
o.Expect(strings.Contains(rpm_output, "NetworkManager-libreswan")).To(o.BeTrue())
//With 4.15+, use nmstate to config ipsec
installNMstateOperator(oc)
})
// author: [email protected]
g.It("Author:anusaxen-High-74222-[rdu2cluster] Transport tunnel can be setup for IPSEC NS in NAT env, [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to RDU2 cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
err := applyConfigTypeExtHost(leftPublicIP, "host2hostTransportRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
policyName := "ipsec-policy-transport-74222"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
g.It("Author:anusaxen-High-74223-[rdu2cluster] Tunnel mode can be setup for IPSEC NS in NAT env, [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to RDU2 cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
err := applyConfigTypeExtHost(leftPublicIP, "host2hostTunnelRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
policyName := "ipsec-policy-transport-74223"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
configIPSecNMSatePolicy(oc, policyName, rightIP2, rightNode2, ipsecTunnel, leftIP, nodeCert2, "tunnel")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode2)
phyInf, nicError := getSnifPhyInf(oc, rightNode2)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode2, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode2, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:huirwang-High-67472-Transport tunnel can be setup for IPSEC NS, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-transport-67472"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
cmdTcpdump.Process.Kill()
})
// author: [email protected]
g.It("Author:huirwang-High-67473-Service nodeport can be accessed with ESP encrypted, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
)
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-67473"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
g.By("Create a namespace")
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: rightNode,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
g.By("Create a test service which is in front of the above pods")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns1,
protocol: "TCP",
selector: "hello-pod",
serviceType: "NodePort",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
svc.ipFamilyPolicy = "SingleStack"
svc.createServiceFromParams(oc)
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
exutil.By("Checking the traffic is encrypted by ESP when curl NodePort service from external host")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, "test-service", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
curlCmd := fmt.Sprintf("curl %s:%s &", rightIP, nodePort)
time.Sleep(5 * time.Second)
err = sshRunCmd(leftPublicIP, "core", curlCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for http is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
})
// author: [email protected]
g.It("Author:huirwang-Longduration-NonPreRelease-Medium-67474-Medium-69176-IPSec tunnel can be up after restart IPSec service or restart node, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-transport-69176"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
//Due to bug https://issues.redhat.com/browse/OCPBUGS-27839,skip below step for now"
/*exutil.By("Restart ipsec service on right node")
ns := oc.Namespace()
cmd2 := "systemctl restart ipsec.service"
_, ipsecErr = exutil.DebugNodeWithChroot(oc, rightNode, "/bin/bash", "-c", cmd2)
o.Expect(ipsecErr).NotTo(o.HaveOccurred())*/
exutil.By("Reboot node which is configured IPSec NS")
defer checkNodeStatus(oc, rightNode, "Ready")
rebootNode(oc, rightNode)
checkNodeStatus(oc, rightNode, "NotReady")
checkNodeStatus(oc, rightNode, "Ready")
exutil.By("Verify ipsec session was established between worker node and external host again!")
o.Eventually(func() bool {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/32 dir out ; ip xfrm policy get src %s/32 dst %s/32 dir in ", rightIP, leftIP, leftIP, rightIP)
ipXfrmPolicy, ipsecErr := exutil.DebugNodeWithChroot(oc, rightNode, "/bin/bash", "-c", cmd)
return ipsecErr == nil && strings.Contains(ipXfrmPolicy, "transport")
}, "300s", "30s").Should(o.BeTrue(), "IPSec tunnel connection was not restored.")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
cmdTcpdump.Process.Kill()
})
// author: [email protected]
g.It("Author:huirwang-High-67475-Be able to access hostnetwork pod with traffic encrypted, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
hostPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml")
)
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-67475"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
g.By("Create a namespace")
ns1 := oc.Namespace()
//Required for hostnetwork pod
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create a hostnetwork pod in ns1")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: rightNode,
template: hostPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
exutil.By("Checking the traffic is encrypted by ESP when curl hostpod from external host")
time.Sleep(5 * time.Second)
curlCmd := fmt.Sprintf("curl %s:%s &", rightIP, "8080")
err = sshRunCmd(leftPublicIP, "core", curlCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump output for curl to hostpod is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
})
// author: [email protected]
g.It("Author:huirwang-High-69178-High-38873-Tunnel mode can be setup for IPSec NS,IPSec NS tunnel can be teared down by nmstate config. [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-tunnel-69178"
ipsecTunnel = "plutoTunnelVM"
)
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
configIPSecNMSatePolicy(oc, policyName, rightIP2, rightNode2, ipsecTunnel, leftIP, nodeCert2, "tunnel")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode2)
phyInf, nicError := getSnifPhyInf(oc, rightNode2)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode2, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode2, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
cmdTcpdump.Process.Kill()
exutil.By("Remove IPSec interface")
removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
exutil.By("Verify IPSec interface was removed from node")
ifaceList, ifaceErr := exutil.DebugNodeWithChroot(oc, rightNode2, "nmcli", "con", "show")
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
e2e.Logf(ifaceList)
o.Expect(ifaceList).NotTo(o.ContainSubstring(ipsecTunnel))
exutil.By("Verify the tunnel was teared down")
verifyIPSecTunnelDown(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Verify connection to exteranl host was not broken")
// workaorund for bug https://issues.redhat.com/browse/RHEL-24802
cmd := fmt.Sprintf("ip x p flush;ip x s flush; sleep 2; ping -c4 %s &", rightIP2)
err = sshRunCmd(leftPublicIP, "core", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
})
//author: [email protected]
g.It("Author:anusaxen-Longduration-NonPreRelease-High-71465-Multiplexing Tunnel and Transport type IPsec should work with external host. [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policies for both Transport and Tunnel Type")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-transport-71465"
ipsecTunnel = "plutoTransportVM"
)
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session for transport mode was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
var (
policyName2 = "ipsec-policy-tunnel-71465"
ipsecTunnel2 = "plutoTunnelVM"
)
defer removeIPSecConfig(oc, policyName2, ipsecTunnel2, rightNode2)
configIPSecNMSatePolicy(oc, policyName2, rightIP2, rightNode2, ipsecTunnel2, leftIP, nodeCert2, "tunnel")
exutil.By("Checking ipsec session for tunnel mode was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
//we just need to check traffic on any of rightIP/rightNode to make sure tunnel multiplexing didn't break the whole functionality as tunnel multiplexing has been already verified in above steps
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", rightIP)
err = sshRunCmd(leftPublicIP, "core", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
})
//author: [email protected]
g.It("Author:anusaxen-High-74221-[rdu2cluster] Tunnel mode can be setup for IPSec NS in NAT env - Host2Net [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to local RDU2 BareMetal cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy for host2net Tunnel Type")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-tunnel-host2net-74221"
ipsecTunnel = "plutoTunnelVM_host2net"
rightNetworkAddress = "10.0.184.0" //OSP VM has network address of 10.0.184.0 with eth0 IP 10.0.185.155/22
rightNetworkCidr = "/22"
)
err := applyConfigTypeExtHost(leftPublicIP, "host2netTunnelRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
configIPSecNMSatePolicyHost2net(oc, policyName, rightIP2, rightNode2, ipsecTunnel, leftIP, rightNetworkAddress, rightNetworkCidr, nodeCert2, "tunnel")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUphost2netTunnel(oc, rightNode2, rightIP2, rightNetworkAddress, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode2)
phyInf, nicError := getSnifPhyInf(oc, rightNode2)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode2, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode2, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
//author: [email protected]
g.It("Author:anusaxen-High-74220-[rdu2cluster] Transport mode can be setup for IPSec NS in NAT env - Host2Net [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to local RDU2 BareMetal cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy for host2net Transport Type")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-transport-host2net-74220"
ipsecTunnel = "plutoTransportVM_host2net"
rightNetworkAddress = "10.0.184.0" //OSP VM has network address of 10.0.184.0 with mgmt IP 10.0.185.155/22
rightNetworkCidr = "/22"
)
err := applyConfigTypeExtHost(leftPublicIP, "host2netTransportRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicyHost2net(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, rightNetworkAddress, rightNetworkCidr, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:ansaxen-Medium-73554-External Traffic should still be IPsec encrypted in presense of Admin Network Policy application at egress node [Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
var (
testID = "73554"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template-node.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
)
g.By("Add label to OCP egress node")
defer exutil.DeleteLabelFromNode(oc, rightNode, "team-")
exutil.AddLabelToNode(oc, rightNode, "team", "qe")
exutil.By("Create a Baseline Admin Network Policy with allow action")
banpCR := singleRuleBANPPolicyResourceNode{
name: "default",
subjectKey: matchLabelKey,
subjectVal: "openshift-nmstate",
policyType: "egress",
direction: "to",
ruleName: "default-allow-egress",
ruleAction: "Allow",
ruleKey: "node-role.kubernetes.io/worker",
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleBANPNode(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("Verify ANP with different actions and priorities")
anpIngressRuleCR := singleRuleANPPolicyResourceNode{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: "openshift-nmstate",
priority: 1,
policyType: "egress",
direction: "to",
ruleName: "node-as-egress-peer-" + testID,
ruleAction: "Allow",
ruleKey: "team",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: "qe",
actionname: "egress",
actiontype: "Allow",
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-transport-" + testID
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", rightIP)
err = sshRunCmd(leftPublicIP, "core", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "ESP")).Should(o.BeTrue())
cmdTcpdump.Process.Kill()
exutil.By("Start tcpdump on ipsec right node again")
tcpdumpCmd2 := fmt.Sprintf("timeout 60s tcpdump -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump2, cmdOutput2, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd2).Background()
defer cmdTcpdump2.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Checking ssh between worker node and external host encrypted by ESP")
time.Sleep(5 * time.Second)
result, timeoutTestErr := accessEgressNodeFromIntSvcInstanceOnGCP(leftPublicIP, rightIP)
o.Expect(timeoutTestErr).NotTo(o.HaveOccurred())
o.Expect(result).To(o.Equal("0"))
cmdTcpdump2.Wait()
e2e.Logf("tcpdump for ssh is \n%s", cmdOutput2.String())
o.Expect(strings.Contains(cmdOutput.String(), "ESP")).Should(o.BeTrue())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
013255a0-c62a-4ee9-8c74-11cd417b6649
|
Author:rbrattai-High-66652-Verify IPsec encapsulation is enabled for NAT-T
|
['"context"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:rbrattai-High-66652-Verify IPsec encapsulation is enabled for NAT-T", func() {
// Epic https://issues.redhat.com/browse/SDN-2629
platform := checkPlatform(oc)
if !strings.Contains(platform, "ibmcloud") {
g.Skip("Test requires IBMCloud, skip for other platforms!")
}
ns := "openshift-ovn-kubernetes"
exutil.By("Checking ipsec_encapsulation in ovnkube-node pods")
podList, podListErr := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{
LabelSelector: "app=ovnkube-node",
})
o.Expect(podListErr).NotTo(o.HaveOccurred())
for _, pod := range podList.Items {
cmd := "ovn-nbctl --no-leader-only get NB_Global . options"
e2e.Logf("The command is: %v", cmd)
command1 := []string{"-n", ns, "-c", "nbdb", pod.Name, "--", "bash", "-c", cmd}
out, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(command1...).Output()
if err != nil {
e2e.Logf("Execute command failed with err:%v and output is %v.", err, out)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring(`ipsec_encapsulation="true"`))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
46c2a216-bc60-445a-95cd-2f1bc5758746
|
Author:huirwang-High-38846-Should be able to send node to node ESP traffic on IPsec clusters
|
['"context"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-High-38846-Should be able to send node to node ESP traffic on IPsec clusters", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
hostnwPodTmp = filepath.Join(buildPruningBaseDir, "net-admin-cap-pod-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("Obtain a namespace.")
ns1 := oc.Namespace()
//Required for hostnetwork pod
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create 1st hello pod in ns1")
//create hostnetwork pod on worker0 and worker1, reuse sriov functions for hostnetwork creation which is actually not related to sriov.
pod1 := sriovNetResource{
name: "host-pod1",
namespace: ns1,
tempfile: hostnwPodTmp,
kind: "pod",
}
pod2 := sriovNetResource{
name: "host-pod2",
namespace: ns1,
tempfile: hostnwPodTmp,
kind: "pod",
}
pod1.create(oc, "PODNAME="+pod1.name, "NODENAME="+nodeList.Items[0].Name)
defer pod1.delete(oc)
pod2.create(oc, "PODNAME="+pod2.name, "NODENAME="+nodeList.Items[1].Name)
defer pod2.delete(oc)
errPodRdy5 := waitForPodWithLabelReady(oc, ns1, "name="+pod1.name)
exutil.AssertWaitPollNoErr(errPodRdy5, "hostnetwork pod isn't ready")
errPodRdy6 := waitForPodWithLabelReady(oc, ns1, "name="+pod2.name)
exutil.AssertWaitPollNoErr(errPodRdy6, "hostnetwork pod isn't ready")
exutil.By("Send ESP traffic from pod1")
nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[1].Name)
socatCmd := fmt.Sprintf("nohup socat /dev/random ip-sendto:%s:50", nodeIP2)
e2e.Logf("The socat command is %s", socatCmd)
cmdSocat, _, _, _ := oc.Run("exec").Args("-n", ns1, pod2.name, "--", "bash", "-c", socatCmd).Background()
defer cmdSocat.Process.Kill()
exutil.By("Start tcpdump from pod2.")
tcpdumpCmd := "timeout --preserve-status 60 tcpdump -c 2 -i br-ex \"esp and less 1500\" "
e2e.Logf("The tcpdump command is %s", tcpdumpCmd)
outputTcpdump, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, tcpdumpCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify ESP packets can be captured on pod2.")
o.Expect(outputTcpdump).NotTo(o.ContainSubstring("0 packets captured"))
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
exutil.By("Retest with IPv6 address")
exutil.By("Send ESP traffic from pod1")
socatCmd := fmt.Sprintf("nohup socat /dev/random ip-sendto:%s:50", nodeIP1)
e2e.Logf("The socat command is %s", socatCmd)
cmdSocat, _, _, _ := oc.Run("exec").Args("-n", ns1, pod2.name, "--", "bash", "-c", socatCmd).Background()
defer cmdSocat.Process.Kill()
exutil.By("Start tcpdump from pod2.")
tcpdumpCmd := "timeout --preserve-status 60 tcpdump -c 2 -i br-ex \"esp and less 1500\" "
e2e.Logf("The tcpdump command is %s", tcpdumpCmd)
outputTcpdump, err := e2eoutput.RunHostCmd(pod1.namespace, pod1.name, tcpdumpCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify ESP packets can be captured on pod2.")
o.Expect(outputTcpdump).NotTo(o.ContainSubstring("0 packets captured"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
8fb10c08-7da7-4a55-b8eb-669e48d37540
|
Author:huirwang-High-38845-High-37590-Restarting pluto daemon, restarting ovn-ipsec pods, pods connection should not be broken. [Disruptive]
|
['"context"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-High-38845-High-37590-Restarting pluto daemon, restarting ovn-ipsec pods, pods connection should not be broken. [Disruptive]", func() {
exutil.By("Get one worker node.")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) > 0).Should(o.BeTrue())
exutil.By("kill pluto on one node.")
pkillCmd := "pkill -SEGV pluto"
_, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", pkillCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the ipsec pods ")
//Need to give it some hard coded time for ovn-ipsec pod to notice segfault
ovnNS := "openshift-ovn-kubernetes"
time.Sleep(90 * time.Second)
err = waitForPodWithLabelReady(oc, ovnNS, "app=ovn-ipsec")
exutil.AssertWaitPollNoErr(err, "ipsec pods are not ready after killing pluto")
exutil.By("Restart ipsec pods")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", ovnNS, "-l", "app=ovn-ipsec").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ovnNS, "app=ovn-ipsec")
exutil.AssertWaitPollNoErr(err, "ipsec pods are not ready after killing pluto")
exutil.By("Verify pods connection cross nodes after restarting ipsec pods")
pass := verifyPodConnCrossNodes(oc)
if !pass {
g.Fail("Pods connection checking cross nodes failed!!")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
e9572eaa-ec26-40c9-bb47-48e07cea5cea
|
Author:huirwang-Critical-79184-pod2pod cross nodes traffic should work and not broken.
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-Critical-79184-pod2pod cross nodes traffic should work and not broken.", func() {
exutil.By("Verify pods to pods connection cross nodes.")
pass := verifyPodConnCrossNodes(oc)
if !pass {
g.Fail("Pods connection checking cross nodes failed!!")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b0be498a-072e-45fa-925f-236621326acc
|
Author:anusaxen-High-74222-[rdu2cluster] Transport tunnel can be setup for IPSEC NS in NAT env, [Serial][Disruptive]
|
['"fmt"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:anusaxen-High-74222-[rdu2cluster] Transport tunnel can be setup for IPSEC NS in NAT env, [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to RDU2 cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
err := applyConfigTypeExtHost(leftPublicIP, "host2hostTransportRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
policyName := "ipsec-policy-transport-74222"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
fcb167d6-8115-411c-97d3-fa049dcd6369
|
Author:anusaxen-High-74223-[rdu2cluster] Tunnel mode can be setup for IPSEC NS in NAT env, [Serial][Disruptive]
|
['"fmt"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:anusaxen-High-74223-[rdu2cluster] Tunnel mode can be setup for IPSEC NS in NAT env, [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to RDU2 cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
err := applyConfigTypeExtHost(leftPublicIP, "host2hostTunnelRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
policyName := "ipsec-policy-transport-74223"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
configIPSecNMSatePolicy(oc, policyName, rightIP2, rightNode2, ipsecTunnel, leftIP, nodeCert2, "tunnel")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode2)
phyInf, nicError := getSnifPhyInf(oc, rightNode2)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode2, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode2, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
8375b8aa-36c7-43a9-9102-135639f4a0cf
|
Author:huirwang-High-67472-Transport tunnel can be setup for IPSEC NS, [Serial][Disruptive]
|
['"fmt"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-High-67472-Transport tunnel can be setup for IPSEC NS, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-transport-67472"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
cmdTcpdump.Process.Kill()
})
| |||||
test case
|
openshift/openshift-tests-private
|
36696b85-1f2d-49c9-8acb-f1eb9b3bec12
|
Author:huirwang-High-67473-Service nodeport can be accessed with ESP encrypted, [Serial][Disruptive]
|
['"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-High-67473-Service nodeport can be accessed with ESP encrypted, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
)
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-67473"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
g.By("Create a namespace")
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: rightNode,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
g.By("Create a test service which is in front of the above pods")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns1,
protocol: "TCP",
selector: "hello-pod",
serviceType: "NodePort",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
svc.ipFamilyPolicy = "SingleStack"
svc.createServiceFromParams(oc)
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
exutil.By("Checking the traffic is encrypted by ESP when curl NodePort service from external host")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, "test-service", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
curlCmd := fmt.Sprintf("curl %s:%s &", rightIP, nodePort)
time.Sleep(5 * time.Second)
err = sshRunCmd(leftPublicIP, "core", curlCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for http is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
f3318442-c74a-45ab-b73c-441cd74f0cda
|
Author:huirwang-Longduration-NonPreRelease-Medium-67474-Medium-69176-IPSec tunnel can be up after restart IPSec service or restart node, [Serial][Disruptive]
|
['"fmt"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-Longduration-NonPreRelease-Medium-67474-Medium-69176-IPSec tunnel can be up after restart IPSec service or restart node, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-transport-69176"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
//Due to bug https://issues.redhat.com/browse/OCPBUGS-27839,skip below step for now"
/*exutil.By("Restart ipsec service on right node")
ns := oc.Namespace()
cmd2 := "systemctl restart ipsec.service"
_, ipsecErr = exutil.DebugNodeWithChroot(oc, rightNode, "/bin/bash", "-c", cmd2)
o.Expect(ipsecErr).NotTo(o.HaveOccurred())*/
exutil.By("Reboot node which is configured IPSec NS")
defer checkNodeStatus(oc, rightNode, "Ready")
rebootNode(oc, rightNode)
checkNodeStatus(oc, rightNode, "NotReady")
checkNodeStatus(oc, rightNode, "Ready")
exutil.By("Verify ipsec session was established between worker node and external host again!")
o.Eventually(func() bool {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/32 dir out ; ip xfrm policy get src %s/32 dst %s/32 dir in ", rightIP, leftIP, leftIP, rightIP)
ipXfrmPolicy, ipsecErr := exutil.DebugNodeWithChroot(oc, rightNode, "/bin/bash", "-c", cmd)
return ipsecErr == nil && strings.Contains(ipXfrmPolicy, "transport")
}, "300s", "30s").Should(o.BeTrue(), "IPSec tunnel connection was not restored.")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
cmdTcpdump.Process.Kill()
})
| |||||
test case
|
openshift/openshift-tests-private
|
b0b7a957-4192-4c81-9e23-69df55b918a8
|
Author:huirwang-High-67475-Be able to access hostnetwork pod with traffic encrypted, [Serial][Disruptive]
|
['"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-High-67475-Be able to access hostnetwork pod with traffic encrypted, [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
hostPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml")
)
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-67475"
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
g.By("Create a namespace")
ns1 := oc.Namespace()
//Required for hostnetwork pod
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create a hostnetwork pod in ns1")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: rightNode,
template: hostPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns1, pod1.name)
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
exutil.By("Checking the traffic is encrypted by ESP when curl hostpod from external host")
time.Sleep(5 * time.Second)
curlCmd := fmt.Sprintf("curl %s:%s &", rightIP, "8080")
err = sshRunCmd(leftPublicIP, "core", curlCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump output for curl to hostpod is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
efa8f822-747f-4725-888e-f6c9064fd027
|
Author:huirwang-High-69178-High-38873-Tunnel mode can be setup for IPSec NS,IPSec NS tunnel can be teared down by nmstate config. [Serial][Disruptive]
|
['"fmt"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:huirwang-High-69178-High-38873-Tunnel mode can be setup for IPSec NS,IPSec NS tunnel can be teared down by nmstate config. [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-tunnel-69178"
ipsecTunnel = "plutoTunnelVM"
)
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
configIPSecNMSatePolicy(oc, policyName, rightIP2, rightNode2, ipsecTunnel, leftIP, nodeCert2, "tunnel")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode2)
phyInf, nicError := getSnifPhyInf(oc, rightNode2)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode2, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode2, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
cmdTcpdump.Process.Kill()
exutil.By("Remove IPSec interface")
removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
exutil.By("Verify IPSec interface was removed from node")
ifaceList, ifaceErr := exutil.DebugNodeWithChroot(oc, rightNode2, "nmcli", "con", "show")
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
e2e.Logf(ifaceList)
o.Expect(ifaceList).NotTo(o.ContainSubstring(ipsecTunnel))
exutil.By("Verify the tunnel was teared down")
verifyIPSecTunnelDown(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Verify connection to exteranl host was not broken")
// workaorund for bug https://issues.redhat.com/browse/RHEL-24802
cmd := fmt.Sprintf("ip x p flush;ip x s flush; sleep 2; ping -c4 %s &", rightIP2)
err = sshRunCmd(leftPublicIP, "core", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
9a8f96e4-9837-4ace-a542-dbf106de2425
|
Author:anusaxen-Longduration-NonPreRelease-High-71465-Multiplexing Tunnel and Transport type IPsec should work with external host. [Serial][Disruptive]
|
['"fmt"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:anusaxen-Longduration-NonPreRelease-High-71465-Multiplexing Tunnel and Transport type IPsec should work with external host. [Serial][Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policies for both Transport and Tunnel Type")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-transport-71465"
ipsecTunnel = "plutoTransportVM"
)
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session for transport mode was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
var (
policyName2 = "ipsec-policy-tunnel-71465"
ipsecTunnel2 = "plutoTunnelVM"
)
defer removeIPSecConfig(oc, policyName2, ipsecTunnel2, rightNode2)
configIPSecNMSatePolicy(oc, policyName2, rightIP2, rightNode2, ipsecTunnel2, leftIP, nodeCert2, "tunnel")
exutil.By("Checking ipsec session for tunnel mode was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode2, rightIP2, leftIP, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
//we just need to check traffic on any of rightIP/rightNode to make sure tunnel multiplexing didn't break the whole functionality as tunnel multiplexing has been already verified in above steps
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", rightIP)
err = sshRunCmd(leftPublicIP, "core", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(cmdOutput.String()).To(o.ContainSubstring("ESP"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
2ce79216-ec68-4507-b89f-966d3b201e00
|
Author:anusaxen-High-74221-[rdu2cluster] Tunnel mode can be setup for IPSec NS in NAT env - Host2Net [Serial][Disruptive]
|
['"fmt"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:anusaxen-High-74221-[rdu2cluster] Tunnel mode can be setup for IPSec NS in NAT env - Host2Net [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to local RDU2 BareMetal cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy for host2net Tunnel Type")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-tunnel-host2net-74221"
ipsecTunnel = "plutoTunnelVM_host2net"
rightNetworkAddress = "10.0.184.0" //OSP VM has network address of 10.0.184.0 with eth0 IP 10.0.185.155/22
rightNetworkCidr = "/22"
)
err := applyConfigTypeExtHost(leftPublicIP, "host2netTunnelRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode2)
configIPSecNMSatePolicyHost2net(oc, policyName, rightIP2, rightNode2, ipsecTunnel, leftIP, rightNetworkAddress, rightNetworkCidr, nodeCert2, "tunnel")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUphost2netTunnel(oc, rightNode2, rightIP2, rightNetworkAddress, "tunnel")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode2)
phyInf, nicError := getSnifPhyInf(oc, rightNode2)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode2, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode2, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
67b76529-d0ca-4bd1-94c3-20c926b44793
|
Author:anusaxen-High-74220-[rdu2cluster] Transport mode can be setup for IPSec NS in NAT env - Host2Net [Serial][Disruptive]
|
['"fmt"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:anusaxen-High-74220-[rdu2cluster] Transport mode can be setup for IPSec NS in NAT env - Host2Net [Serial][Disruptive]", func() {
if platformvar != "rdu2" {
g.Skip("This case is only applicable to local RDU2 BareMetal cluster, skipping this testcase.")
}
exutil.By("Configure nmstate ipsec policy for host2net Transport Type")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
var (
policyName = "ipsec-policy-transport-host2net-74220"
ipsecTunnel = "plutoTransportVM_host2net"
rightNetworkAddress = "10.0.184.0" //OSP VM has network address of 10.0.184.0 with mgmt IP 10.0.185.155/22
rightNetworkCidr = "/22"
)
err := applyConfigTypeExtHost(leftPublicIP, "host2netTransportRDU2")
o.Expect(err).NotTo(o.HaveOccurred())
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicyHost2net(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, rightNetworkAddress, rightNetworkCidr, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s udp port 4500 and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
// As above tcpdump command will be executed in background, add sleep time to let the ping action happen later after that.
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by UDP-encap")
pingCmd := fmt.Sprintf("ping -c4 %s &", leftIP)
_, err = exutil.DebugNodeWithChroot(oc, rightNode, "bash", "-c", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "UDP-encap")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
35cf7a03-3585-4545-aa2a-bcbc4efdb9ba
|
Author:ansaxen-Medium-73554-External Traffic should still be IPsec encrypted in presense of Admin Network Policy application at egress node [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ipsec.go
|
g.It("Author:ansaxen-Medium-73554-External Traffic should still be IPsec encrypted in presense of Admin Network Policy application at egress node [Disruptive]", func() {
if platformvar == "rdu2" {
g.Skip("This case is only applicable to GCP, skipping this testcase.")
}
var (
testID = "73554"
testDataDir = exutil.FixturePath("testdata", "networking")
banpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "banp-single-rule-template-node.yaml")
anpCRTemplate = filepath.Join(testDataDir, "adminnetworkpolicy", "anp-single-rule-template-node.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
)
g.By("Add label to OCP egress node")
defer exutil.DeleteLabelFromNode(oc, rightNode, "team-")
exutil.AddLabelToNode(oc, rightNode, "team", "qe")
exutil.By("Create a Baseline Admin Network Policy with allow action")
banpCR := singleRuleBANPPolicyResourceNode{
name: "default",
subjectKey: matchLabelKey,
subjectVal: "openshift-nmstate",
policyType: "egress",
direction: "to",
ruleName: "default-allow-egress",
ruleAction: "Allow",
ruleKey: "node-role.kubernetes.io/worker",
template: banpCRTemplate,
}
defer removeResource(oc, true, true, "banp", banpCR.name)
banpCR.createSingleRuleBANPNode(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("banp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, banpCR.name)).To(o.BeTrue())
exutil.By("Verify ANP with different actions and priorities")
anpIngressRuleCR := singleRuleANPPolicyResourceNode{
name: "anp-" + testID + "-1",
subjectKey: matchLabelKey,
subjectVal: "openshift-nmstate",
priority: 1,
policyType: "egress",
direction: "to",
ruleName: "node-as-egress-peer-" + testID,
ruleAction: "Allow",
ruleKey: "team",
nodeKey: "node-role.kubernetes.io/worker",
ruleVal: "qe",
actionname: "egress",
actiontype: "Allow",
template: anpCRTemplate,
}
defer removeResource(oc, true, true, "anp", anpIngressRuleCR.name)
anpIngressRuleCR.createSingleRuleANPNode(oc)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("anp").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, anpIngressRuleCR.name)).To(o.BeTrue())
exutil.By("Configure nmstate ipsec policy")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
createNMstateCR(oc, nmstateCR)
policyName := "ipsec-policy-transport-" + testID
defer removeIPSecConfig(oc, policyName, ipsecTunnel, rightNode)
configIPSecNMSatePolicy(oc, policyName, rightIP, rightNode, ipsecTunnel, leftIP, nodeCert, "transport")
exutil.By("Checking ipsec session was established between worker node and external host")
verifyIPSecTunnelUp(oc, rightNode, rightIP, leftIP, "transport")
exutil.By("Start tcpdump on ipsec right node")
e2e.Logf("Trying to get physical interface on the node,%s", rightNode)
phyInf, nicError := getSnifPhyInf(oc, rightNode)
o.Expect(nicError).NotTo(o.HaveOccurred())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
tcpdumpCmd := fmt.Sprintf("timeout 60s tcpdump -c 4 -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
exutil.By("Checking icmp between worker node and external host encrypted by ESP")
pingCmd := fmt.Sprintf("ping -c4 %s &", rightIP)
err = sshRunCmd(leftPublicIP, "core", pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cmdTcpdump.Wait()
e2e.Logf("tcpdump for ping is \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "ESP")).Should(o.BeTrue())
cmdTcpdump.Process.Kill()
exutil.By("Start tcpdump on ipsec right node again")
tcpdumpCmd2 := fmt.Sprintf("timeout 60s tcpdump -nni %s esp and dst %s", phyInf, leftIP)
cmdTcpdump2, cmdOutput2, _, err := oc.AsAdmin().Run("debug").Args("node/"+rightNode, "--", "bash", "-c", tcpdumpCmd2).Background()
defer cmdTcpdump2.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Checking ssh between worker node and external host encrypted by ESP")
time.Sleep(5 * time.Second)
result, timeoutTestErr := accessEgressNodeFromIntSvcInstanceOnGCP(leftPublicIP, rightIP)
o.Expect(timeoutTestErr).NotTo(o.HaveOccurred())
o.Expect(result).To(o.Equal("0"))
cmdTcpdump2.Wait()
e2e.Logf("tcpdump for ssh is \n%s", cmdOutput2.String())
o.Expect(strings.Contains(cmdOutput.String(), "ESP")).Should(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
bdf2d978-a3e8-4acc-86d1-813ea64242a1
|
metallb
|
import (
"context"
"encoding/json"
"fmt"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
package networking
import (
"context"
"encoding/json"
"fmt"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
// Test for staging pipeline
var _ = g.Describe("[sig-networking] SDN metallb", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-metallb", exutil.KubeConfigPath())
opNamespace = "metallb-system"
opName = "metallb-operator"
testDataDir = exutil.FixturePath("testdata", "networking/metallb")
)
g.BeforeEach(func() {
networkType := exutil.CheckNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("This case requires OVNKubernetes as network plugin, skip the test as the cluster does not have OVN network plugin")
}
namespaceTemplate := filepath.Join(testDataDir, "namespace-template.yaml")
operatorGroupTemplate := filepath.Join(testDataDir, "operatorgroup-template.yaml")
subscriptionTemplate := filepath.Join(testDataDir, "subscription-template.yaml")
sub := subscriptionResource{
name: "metallb-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
g.By("Making sure CRDs are successfully installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "bfdprofiles.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgpadvertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgppeers.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "communities.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ipaddresspools.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "l2advertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "metallbs.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrconfigurations.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrnodestates.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "servicel2statuses.metallb.io")).To(o.BeTrue())
})
g.It("Author:asood-NonHyperShiftHOST-LEVEL0-StagerunBoth-High-43074-MetalLB-Operator installation ", func() {
g.By("Checking metalLB operator installation")
e2e.Logf("Operator install check successfull as part of setup !!!!!")
g.By("SUCCESS - MetalLB operator installed")
})
g.It("Author:asood-NonHyperShiftHOST-Medium-50950-Verify community creation and webhook validation.", func() {
communityTemplate := filepath.Join(testDataDir, "community-template.yaml")
communityCR := communityResource{
name: "community-50950",
namespace: opNamespace,
communityName: "NO_ADVERTISE",
value1: "65535",
value2: "65282",
template: communityTemplate,
}
defer removeResource(oc, true, true, "community", communityCR.name, "-n", communityCR.namespace)
result := createCommunityCR(oc, communityCR)
o.Expect(result).To(o.BeTrue())
patchCommunity := `[{"op": "add", "path": "/spec/communities/1", "value": {"name": "NO_ADVERTISE", "value":"65535:65282"}}]`
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("community", communityCR.name, "-n", communityCR.namespace, "--type=json", "-p", patchCommunity).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of community")).To(o.BeTrue())
})
g.It("Author:asood-NonHyperShiftHOST-Medium-50947-Medium-50948-Verify BGP and L2 Advertisement webhook validation.", func() {
workers := []string{"worker-1", "worker-2", "worker-3"}
bgpCommunties := []string{"65001:65500"}
ipaddrpools := []string{"ipaddresspool-0", "ipaddresspool-1"}
bgpPeers := []string{"peer-64500", "peer-65000"}
interfaces := []string{"br-ex", "eno1", "eno2"}
crMap := make(map[string]string)
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-50948",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
l2advertisement := l2AdvertisementResource{
name: "l2-adv-50947",
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
exutil.By("Create BGP and L2 Advertisement")
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
crMap["bgpadvertisements"] = bgpAdvertisement.name
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
crMap["l2advertisements"] = l2advertisement.name
for crType, crName := range crMap {
exutil.By(fmt.Sprintf("Validate duplicate ip address pool is rejected for %s", crType))
ipaddrpools = append(ipaddrpools, "ipaddresspool-1")
addrPoolList, err := json.Marshal(ipaddrpools)
o.Expect(err).NotTo(o.HaveOccurred())
patchAdvertisement := fmt.Sprintf("{\"spec\":{\"ipAddressPools\": %s}}", string(addrPoolList))
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(crType, crName, "-n", opNamespace, "--type=merge", "-p", patchAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of ipAddressPools")).To(o.BeTrue())
exutil.By(fmt.Sprintf("Validate duplicate node is rejected for %s", crType))
workers = append(workers, "worker-1")
workerList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
patchAdvertisement = fmt.Sprintf("{\"spec\":{\"nodeSelectors\":[{\"matchExpressions\":[{\"key\":\"kubernetes.io/hostname\",\"operator\":\"In\",\"values\":%s}]}]}}", string(workerList))
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args(crType, crName, "-n", opNamespace, "--type=merge", "-p", patchAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of match expression value in label selector")).To(o.BeTrue())
}
exutil.By("Validate community strings is updated with community object for BGP Advertisements")
bgpCommunties = []string{"65001:65500", "community1"}
bgpCommStrList, err := json.Marshal(bgpCommunties)
o.Expect(err).NotTo(o.HaveOccurred())
patchBgpAdvertisement := fmt.Sprintf("{\"spec\":{\"communities\": %s}}", string(bgpCommStrList))
_, patchErr1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr1).NotTo(o.HaveOccurred())
exutil.By("Validate duplicate community strings is rejected for BGP Advertisements")
bgpCommunties = append(bgpCommunties, "65001:65500")
bgpCommStrList, err = json.Marshal(bgpCommunties)
o.Expect(err).NotTo(o.HaveOccurred())
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"communities\": %s}}", string(bgpCommStrList))
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of community")).To(o.BeTrue())
exutil.By("Validate duplicate BGP Peer is rejected for BGP Advertisements")
bgpPeers = append(bgpPeers, "peer-64500")
bgpPeersList, err := json.Marshal(bgpPeers)
o.Expect(err).NotTo(o.HaveOccurred())
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"peers\": %s}}", string(bgpPeersList))
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of peers")).To(o.BeTrue())
exutil.By("Validate invalid IPv4 aggregation length is rejected for BGP Advertisements")
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"aggregationLength\": %d}}", 33)
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "invalid aggregation length")).To(o.BeTrue())
exutil.By("Validate invalid IPv6 aggregation length is rejected for BGP Advertisements")
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"aggregationLengthV6\": %d}}", 129)
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "invalid aggregation length")).To(o.BeTrue())
})
g.It("Author:qiowang-NonHyperShiftHOST-High-46124-Verify webhook validation for BGP peer", func() {
exutil.By("1. Create two BGPPeer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
for i := 1; i < 3; i++ {
BGPPeerCR := bgpPeerResource{
name: "peer-46124-" + strconv.Itoa(i),
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: "",
myASN: 65501,
peerASN: 65500 + i,
peerAddress: "10.10.10." + strconv.Itoa(i),
peerPort: 6000,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
}
exutil.By("2. Validate two BGPPeer with same peerASN and peerAddress is invalid")
patchBGPPeer := `{"spec":{"peerASN":65501,"peerAddress": "10.10.10.1"}}`
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate BGPPeers")).To(o.BeTrue())
exutil.By("3. Validate two BGPPeer with different peerASN but same peerAddress is invalid")
patchBGPPeer = `{"spec":{"peerAddress": "10.10.10.1"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "already exists")).To(o.BeTrue())
exutil.By("4. Validate two BGPPeer with different myASN is invalid")
patchBGPPeer = `{"spec":{"myASN": 65502}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "all myAsn must be equal for the same VRF")).To(o.BeTrue())
exutil.By("5. Validate BGPPeer with one of the ASN number more than 4294967296 is invalid")
patchBGPPeer = `{"spec":{"myASN": 4294967297}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "spec.myASN in body should be less than or equal to 4294967295")).To(o.BeTrue())
exutil.By("6. Validate BGPPeer with invalid source address is invalid")
patchBGPPeer = `{"spec":{"peerAddress": "10.10.10"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "invalid BGPPeer address")).To(o.BeTrue())
exutil.By("7. Validate BGPPeer with port number greater than 16384 or less than 0 is invalid")
patchBGPPeer = `{"spec":{"peerPort": 16385}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "spec.peerPort in body should be less than or equal to 16384")).To(o.BeTrue())
patchBGPPeer = `{"spec":{"peerPort": -1}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "spec.peerPort in body should be greater than or equal to 0")).To(o.BeTrue())
exutil.By("8. Validate hold timer and keepalive timer without unit is invalid")
patchBGPPeer = `{"spec":{"holdTime": "30", "keepaliveTime": "10"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "missing unit")).To(o.BeTrue())
exutil.By("9. Validate BGPPeer with keepalive timer greater than holdtime is invalid")
patchBGPPeer = `{"spec":{"keepaliveTime": "40s"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "must be lower than holdTime")).To(o.BeTrue())
})
})
// Tests related to metallb install and CR creation that can be executed more frequently
var _ = g.Describe("[sig-networking] SDN metallb install", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-metallb", exutil.KubeConfigPath())
opNamespace = "metallb-system"
opName = "metallb-operator"
testDataDir = exutil.FixturePath("testdata", "networking/metallb")
metalLBNodeSelKey = "node-role.kubernetes.io/worker"
metalLBNodeSelVal = ""
metalLBControllerSelKey = "node-role.kubernetes.io/worker"
metalLBControllerSelVal = ""
)
g.BeforeEach(func() {
// Install metallb on vSphere and baremetal but skip on all platforms
exutil.By("Check the platform if it is suitable for running the test")
platform := exutil.CheckPlatform(oc)
networkType := exutil.CheckNetworkType(oc)
if !(strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "none")) || !strings.Contains(networkType, "ovn") {
g.Skip("These cases can only be run on networking team's private RDU BM cluster, vSphere and IPI/UPI BM, skip for other platforms or other non-OVN network plugin!!!")
}
namespaceTemplate := filepath.Join(testDataDir, "namespace-template.yaml")
operatorGroupTemplate := filepath.Join(testDataDir, "operatorgroup-template.yaml")
subscriptionTemplate := filepath.Join(testDataDir, "subscription-template.yaml")
sub := subscriptionResource{
name: "metallb-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
g.By("Making sure CRDs are successfully installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "bfdprofiles.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgpadvertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgppeers.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "communities.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ipaddresspools.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "l2advertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "metallbs.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrconfigurations.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrnodestates.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "servicel2statuses.metallb.io")).To(o.BeTrue())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("metallb", "-n", opNamespace).Output()
if err == nil && strings.Contains(output, "metallb") {
e2e.Logf("Deleting the existing metallb CR")
removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
}
})
g.It("Author:asood-NonHyperShiftHOST-High-46560-High-50944-MetalLB-CR All Workers Creation and Verify the logging level of MetalLB can be changed for debugging [Serial]", func() {
exutil.By("Creating metalLB CR on all the worker nodes in cluster")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: metalLBNodeSelKey,
nodeSelectorVal: metalLBNodeSelVal,
controllerSelectorKey: metalLBControllerSelKey,
controllerSelectorVal: metalLBControllerSelVal,
template: metallbCRTemplate,
}
defer removeResource(oc, true, true, "metallb", metallbCR.name, "-n", metallbCR.namespace)
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - MetalLB CR Created")
exutil.By("Validate speaker and frr-k8s pods scheduled on worker nodes")
result = validateAllWorkerNodeMCR(oc, opNamespace)
o.Expect(result).To(o.BeTrue())
exutil.By("50944-Verify the logging level of MetalLB can be changed for debugging")
exutil.By("Validate log level is info")
level := "info"
components := [3]string{"controller", "speaker", "frr-k8s"}
var err string
for _, component := range components {
result, err = checkLogLevelPod(oc, component, opNamespace, level)
o.Expect(result).To(o.BeTrue())
o.Expect(err).Should(o.BeEmpty())
e2e.Logf("%s pod log level is %s", component, level)
}
exutil.By("Change the log level")
//defer not needed because metallb CR is deleted at the end of the test
patchResourceAsAdmin(oc, "metallb/"+metallbCR.name, "{\"spec\":{\"logLevel\": \"debug\"}}", opNamespace)
exutil.By("Verify the deployment and daemon set have rolled out")
dpStatus, dpStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "deployment", "controller", "--timeout", "5m").Output()
o.Expect(dpStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dpStatus, "successfully rolled out")).To(o.BeTrue())
dsSets := [2]string{"speaker", "frr-k8s"}
for _, dsSet := range dsSets {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", dsSet, "--timeout", "5m").Output()
o.Expect(dsStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dsStatus, "successfully rolled out")).To(o.BeTrue())
}
level = "debug"
for _, component := range components {
result, err = checkLogLevelPod(oc, component, opNamespace, level)
o.Expect(result).To(o.BeTrue())
o.Expect(err).Should(o.BeEmpty())
e2e.Logf("%s pod log level is %s", component, level)
}
})
g.It("Author:asood-NonHyperShiftHOST-High-54857-Validate controller and pod can be scheduled based on node selectors.[Serial]", func() {
var nodeSelKey = "kubernetes.io/hostname"
exutil.By("Obtain the worker nodes in cluster")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("This test can only be run for cluster that has atleast two worker nodes.")
}
exutil.By("Creating metalLB CR on specific worker nodes in cluster")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: nodeSelKey,
nodeSelectorVal: workerList.Items[0].Name,
controllerSelectorKey: nodeSelKey,
controllerSelectorVal: workerList.Items[1].Name,
template: metallbCRTemplate,
}
defer removeResource(oc, true, true, "metallb", metallbCR.name, "-n", metallbCR.namespace)
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("Get the pod names for speaker and controller respectively scheduled on %s and %s", workerList.Items[0].Name, workerList.Items[1].Name))
components := []string{"speaker", "controller"}
for i, component := range components {
podName, err := exutil.GetPodName(oc, opNamespace, "component="+component, workerList.Items[i].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
}
})
g.It("Author:asood-NonHyperShiftHOST-High-54822-Validate controller and speaker pods can be scheduled based on affinity - node affinity, pod affinity and pod anti affinity.[Serial]", func() {
var (
testDataBaseDir = exutil.FixturePath("testdata", "networking")
nodeLabels = []string{"east", "west"}
nodeAffinityFile = filepath.Join(testDataDir, "metallb-cr-node-affinity.yaml")
nodeAffinityTemplate = filepath.Join(testDataDir, "metallb-cr-node-affinity-template.yaml")
podAffinityTemplate = filepath.Join(testDataDir, "metallb-cr-pod-affinity-template.yaml")
podAntiAffinityTemplate = filepath.Join(testDataDir, "metallb-cr-pod-antiaffinity-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataBaseDir, "ping-for-pod-specific-node-template.yaml")
components = []string{"controller", "speaker", "frr-k8s"}
)
exutil.By("Obtain the worker nodes in cluster")
workersList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workersList.Items) < 2 {
g.Skip("This test can only be run for cluster that has atleast two worker nodes.")
}
exutil.By("Label two nodes of the cluster")
for i := 0; i < 2; i++ {
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workersList.Items[i].Name, "zone")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workersList.Items[i].Name, "zone", nodeLabels[i])
}
defer removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
metallbCR := metalLBAffinityCRResource{
name: "metallb",
namespace: opNamespace,
param1: "",
param2: "",
template: "",
}
// Node afinity
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("Create meatllb CR with Node Affinity using node selector term - matchExpressions")
createResourceFromFile(oc, opNamespace, nodeAffinityFile)
} else {
exutil.By("Create meatllb CR with Node Affinity using node selector term - matchFields")
metallbCR.param1 = workersList.Items[0].Name
metallbCR.param2 = workersList.Items[1].Name
metallbCR.template = nodeAffinityTemplate
o.Expect(createMetalLBAffinityCR(oc, metallbCR)).To(o.BeTrue())
}
exutil.By(fmt.Sprintf("Get the pod names for controller and speaker & frr-k8s respectively scheduled on %s and %s", workersList.Items[0].Name, workersList.Items[1].Name))
expectedPodNodeList := []string{workersList.Items[0].Name, workersList.Items[1].Name, workersList.Items[1].Name}
for j, component := range components {
if j == 0 {
err := waitForPodWithLabelReady(oc, opNamespace, "component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", component, "--timeout", "5m").Output()
o.Expect(dsStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dsStatus, "successfully rolled out")).To(o.BeTrue())
}
podName, err := exutil.GetPodName(oc, opNamespace, "component="+component, expectedPodNodeList[j])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
}
removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
}
// Pod affinity and anti affinity
exutil.By("Create a pod on one of the nodes")
pod := pingPodResourceNode{
name: "hello-pod",
namespace: oc.Namespace(),
nodename: workersList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
metallbCR.param1 = pod.namespace
metallbCR.param2 = pod.namespace
metallbCRTemplateList := []string{podAffinityTemplate, podAntiAffinityTemplate}
dsSearchStrList := []string{fmt.Sprintf("1 of %v updated pods are available", len(workersList.Items)), fmt.Sprintf("%v of %v updated pods are available", len(workersList.Items)-1, len(workersList.Items))}
scenarioStrList := []string{"affinity", "anti affinity"}
for index, scenario := range scenarioStrList {
exutil.By(fmt.Sprintf("Create meatllb CR with pod %s", scenario))
metallbCR.template = metallbCRTemplateList[index]
o.Expect(createMetalLBAffinityCR(oc, metallbCR)).To(o.BeTrue())
exutil.By(fmt.Sprintf("Validate roll out status of speaker and frr-k8s for pod %s", scenario))
for i := 1; i < len(components); i++ {
o.Eventually(func() bool {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", components[i], "--timeout", "10s").Output()
o.Expect(dsStatusErr).To(o.HaveOccurred())
return strings.Contains(dsStatus, dsSearchStrList[index])
}, "60s", "10s").Should(o.BeTrue(), "Pods did not reach running status")
}
if index == 0 {
exutil.By(fmt.Sprintf("Validate metallb pods are running only on %s", workersList.Items[0].Name))
for i := 0; i < len(components); i++ {
podName, err := exutil.GetPodName(oc, opNamespace, "component="+components[i], workersList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
}
} else {
exutil.By(fmt.Sprintf("Validate metallb pods are not running on %s", workersList.Items[0].Name))
for i := 0; i < len(components); i++ {
podName, err := exutil.GetPodName(oc, opNamespace, "component="+components[i], workersList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).To(o.BeEmpty())
}
}
removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
}
})
g.It("Author:asood-NonHyperShiftHOST-High-54823-Validate controller and speaker pods are scheduled on nodes based priority class. [Serial]", func() {
var (
metallbCRPriorityClassFile = filepath.Join(testDataDir, "metallb-cr-priority-class.yaml")
metallbPriorityClassFile = filepath.Join(testDataDir, "metallb-priority-class.yaml")
components = []string{"controller", "speaker", "frr-k8s"}
)
exutil.By("Create meatllb CR with priority class")
createResourceFromFile(oc, opNamespace, metallbCRPriorityClassFile)
defer removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
exutil.By("Validate metallb CR not created as priority class is not yet created")
// just check the daemon sets as pods are not expected to be scheduled
for i := 1; i < len(components); i++ {
o.Eventually(func() bool {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", components[i], "--timeout", "10s").Output()
o.Expect(dsStatusErr).To(o.HaveOccurred())
return strings.Contains(dsStatus, "0 out of")
}, "60s", "10s").Should(o.BeTrue(), "Pods did not reach running status")
}
createResourceFromFile(oc, opNamespace, metallbPriorityClassFile)
defer removeResource(oc, true, true, "priorityclass", "metallb-high-priority")
exutil.By("Validate metallb CR is created after priority class is created")
for j, component := range components {
if j == 0 {
err := waitForPodWithLabelReady(oc, opNamespace, "component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", component, "--timeout", "60s").Output()
o.Expect(dsStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dsStatus, "successfully rolled out")).To(o.BeTrue())
}
}
})
})
// L2 tests
var _ = g.Describe("[sig-networking] SDN metallb l2", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-metallb", exutil.KubeConfigPath())
opNamespace = "metallb-system"
opName = "metallb-operator"
serviceLabelKey = "environ"
serviceLabelValue = "Test"
serviceNodePortAllocation = true
testDataDir = exutil.FixturePath("testdata", "networking/metallb")
l2Addresses = [2][2]string{{"192.168.111.65-192.168.111.69", "192.168.111.70-192.168.111.74"}, {"192.168.111.75-192.168.111.79", "192.168.111.80-192.168.111.85"}}
proxyHost = "10.8.1.181"
metalLBNodeSelKey = "node-role.kubernetes.io/worker"
metalLBNodeSelVal = ""
metalLBControllerSelKey = "node-role.kubernetes.io/worker"
metalLBControllerSelVal = ""
ipAddressPoolLabelKey = "zone"
ipAddressPoolLabelVal = "east"
)
g.BeforeEach(func() {
exutil.By("Check the platform if it is suitable for running the test")
networkType := exutil.CheckNetworkType(oc)
if !(isPlatformSuitable(oc)) || !strings.Contains(networkType, "ovn") {
g.Skip("These cases can only be run on networking team's private RDU cluster , skip for other platforms or non-OVN network plugin!!!")
}
namespaceTemplate := filepath.Join(testDataDir, "namespace-template.yaml")
operatorGroupTemplate := filepath.Join(testDataDir, "operatorgroup-template.yaml")
subscriptionTemplate := filepath.Join(testDataDir, "subscription-template.yaml")
sub := subscriptionResource{
name: "metallb-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
exutil.By("Check the catalog source")
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
exutil.By("Making sure CRDs are successfully installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "bfdprofiles.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgpadvertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgppeers.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "communities.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ipaddresspools.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "l2advertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "metallbs.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrconfigurations.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrnodestates.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "servicel2statuses.metallb.io")).To(o.BeTrue())
exutil.By("Create MetalLB CR")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: metalLBNodeSelKey,
nodeSelectorVal: metalLBNodeSelVal,
controllerSelectorKey: metalLBControllerSelKey,
controllerSelectorVal: metalLBControllerSelVal,
template: metallbCRTemplate,
}
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - MetalLB CR Created")
})
g.It("Author:asood-High-43075-Create L2 LoadBalancer Service [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "43075"
)
exutil.By("1. Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("2. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("SUCCESS - IP Addresspool")
exutil.By("3. Create L2Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
g.By("4. Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
g.By("4.1 Create a service with ExtenalTrafficPolicy Local")
svc1 := loadBalancerServiceResource{
name: "hello-world-local",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Local",
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc1, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
g.By("4.2 Create a service with ExtenalTrafficPolicy Cluster")
svc2 := loadBalancerServiceResource{
name: "hello-world-cluster",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc2, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - Services created successfully")
exutil.By("4.3 Validate LoadBalancer services")
err = checkLoadBalancerSvcStatus(oc, svc1.namespace, svc1.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc1.namespace, svc1.name)
e2e.Logf("The service %s External IP is %q", svc1.name, svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc2.namespace, svc2.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc2.namespace, svc2.name)
e2e.Logf("The service %s External IP is %q", svc2.name, svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
})
g.It("Author:asood-High-53333-High-49622-Verify for the service IP address of NodePort or LoadBalancer service ARP requests gets response from one interface only and prometheus metrics are updated when service is removed. [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "53333"
)
exutil.By("Test case for bug ID 2054225")
exutil.By("1.0 Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By(fmt.Sprintf("1.1 Add label to operator namespace %s to enable monitoring", opNamespace))
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", opNamespace, "openshift.io/cluster-monitoring-").Execute()
labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", opNamespace, "openshift.io/cluster-monitoring=true").Execute()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("2. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("SUCCESS - IP Addresspool")
exutil.By("3. Create L2Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("4. Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
exutil.By("4.1 Create a service with ExtenalTrafficPolicy Cluster")
svc := loadBalancerServiceResource{
name: "hello-world-cluster",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - Services created successfully")
exutil.By("4.2 Validate LoadBalancer services")
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("5. Validate MAC Address assigned to service")
exutil.By("5.1 Get the node announcing the service IP")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node announcing the service IP %s ", nodeName)
g.By("5.2 Obtain MAC address for Load Balancer Service IP")
macAddress, result := obtainMACAddressForIP(oc, masterNodeList[0], svcIP, 5)
o.Expect(result).To(o.BeTrue())
o.Expect(macAddress).NotTo(o.BeEmpty())
e2e.Logf("MAC address by ARP Lookup %s ", macAddress)
exutil.By("5.3 Get MAC address configured on the node interface announcing the service IP Address")
macAddress1 := getNodeMacAddress(oc, nodeName)
o.Expect(macAddress1).NotTo(o.BeEmpty())
e2e.Logf("MAC address of announcing node %s ", macAddress1)
o.Expect(strings.ToLower(macAddress)).Should(o.Equal(macAddress1))
exutil.By("OCP-49622 LoadBalancer service prometheus metrics are updated when service is removed")
l2Metrics := "metallb_speaker_announced"
exutil.By(fmt.Sprintf("6.1 Get %s metrics for the service %s at %s IP Address", l2Metrics, svc.name, svcIP))
o.Expect(checkPrometheusMetrics(oc, 10*time.Second, 200*time.Second, false, l2Metrics, true)).To(o.BeTrue())
exutil.By("6.2 Delete the service and check meterics are removed")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
o.Expect(checkPrometheusMetrics(oc, 5*time.Second, 30*time.Second, true, l2Metrics, false)).To(o.BeTrue())
})
g.It("Author:asood-High-60182-Verify the nodeport is not allocated to VIP based LoadBalancer service type [Disruptive]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
svc_names = [2]string{"hello-world-cluster", "hello-world-local"}
svc_etp = [2]string{"Cluster", "Local"}
)
exutil.By("1. Determine suitability of worker nodes for the test")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("2. Create two namespace")
for i := 0; i < 2; i++ {
oc.SetupProject()
ns = oc.Namespace()
namespaces = append(namespaces, ns)
g.By("Label the namespace")
_, err := oc.AsAdmin().Run("label").Args("namespace", ns, namespaceLabelKey+"="+namespaceLabelValue[0], "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("3. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
g.By("SUCCESS - IP Addresspool")
g.By("4. Create L2Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
for i := 0; i < 2; i++ {
g.By("5.1 Create a service with extenaltrafficpolicy " + svc_etp[i])
svc := loadBalancerServiceResource{
name: svc_names[i],
namespace: namespaces[i],
externaltrafficpolicy: svc_etp[i],
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: false,
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
g.By("5.2 LoadBalancer service with name " + svc_names[i])
g.By("5.2.1 Check LoadBalancer service is created")
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("5.2.2 Get LoadBalancer service IP")
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
g.By("5.2.3 Get LoadBalancer service IP announcing node")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
g.By("5.2.4 Validate service")
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
g.By("5.2.5 Check nodePort is not assigned to service")
nodePort := getLoadBalancerSvcNodePort(oc, svc.namespace, svc.name)
o.Expect(nodePort).To(o.BeEmpty())
}
g.By("6. Change the shared gateway mode to local gateway mode")
var desiredMode string
origMode := getOVNGatewayMode(oc)
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
g.By("7. Validate services in modified gateway mode " + desiredMode)
for i := 0; i < 2; i++ {
g.By("7.1 Create a service with extenal traffic policy " + svc_etp[i])
svc_names[i] = svc_names[i] + "-0"
svc := loadBalancerServiceResource{
name: svc_names[i],
namespace: namespaces[i],
externaltrafficpolicy: svc_etp[i],
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: false,
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
g.By("7.2 LoadBalancer service with name " + svc_names[i])
g.By("7.2.1 Check LoadBalancer service is created")
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("7.2.2 Get LoadBalancer service IP")
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
g.By("7.2.3 Get LoadBalancer service IP announcing node")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
g.By("7.2.4 Validate service")
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
g.By("7.2.5 Check nodePort is not assigned to service")
nodePort := getLoadBalancerSvcNodePort(oc, svc.namespace, svc.name)
o.Expect(nodePort).To(o.BeEmpty())
}
})
// Test cases for CNF-6313 L2 interface selector productization
g.It("Author:asood-Longduration-NonPreRelease-High-60513-High-60514-High-60515-High-60518-High-60519-Verify L2 service is reachable if service IP is advertised from specific interface on node using one or more L2 advertisements through the updates to L2 advetisements and gets indication if interface is not configured[Serial]", func() {
var (
ns string
namespaces []string
testID = "60513"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
vmWorkers []string
workers []string
ipaddresspools []string
)
//Two worker nodes needed to create l2advertisement object
exutil.By("0. Determine suitability of worker nodes for the test")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
for i := 0; i < len(workerList.Items); i++ {
if strings.Contains(workerList.Items[i].Name, "worker") {
vmWorkers = append(vmWorkers, workerList.Items[i].Name)
} else {
workers = append(workers, workerList.Items[i].Name)
}
}
e2e.Logf("Virtual Nodes %s", vmWorkers)
e2e.Logf("Real Nodes %s", workers)
if len(workers) < 1 || len(vmWorkers) < 1 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes, virtual and real each.")
}
vmList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("2. Get the master nodes in the cluster for validating service")
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By("3. Create IP addresspools")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By(fmt.Sprintf("IP address pool %s created successfully", ipaddresspools[:]))
//Ensure address is not assigned from address pool automatically by setting autoAssign to false
addressList, err := json.Marshal(l2Addresses[1][:])
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"autoAssign\": false, \"addresses\": %s}}", string(addressList))
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[1], patchInfo, "metallb-system")
exutil.By("4. Create L2 Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
//Just assign one of the addresspool, use the second one for later
ipaddrpools := []string{ipaddresspools[0], ""}
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("5.0 60513 Verify L2 service with ETP Local or Cluster is reachable if service IP is advertised from specific interface on node.")
exutil.By(fmt.Sprintf("5.1 Patch L2 Advertisement to ensure one interface that allows functionl services for test case %s", testID))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
exutil.By("5.2 Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-0",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
exutil.By(fmt.Sprintf("5.3. Create a service with ETP cluster with name %s", svc.name))
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
exutil.By("5.4 Validate LoadBalancer services")
svcErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr := wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
svc.name = "hello-world-" + testID + "-1"
svc.externaltrafficpolicy = "Local"
exutil.By(fmt.Sprintf("5.5 Create a service with ETP %s with name %s", svc.externaltrafficpolicy, svc.name))
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
exutil.By("5.6 Validate LoadBalancer services")
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, masterNodeList[0], svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
testID = "60514"
exutil.By("6.0 60514 Verify user is given indication if specified interface does not exist on any of the selected node in L2 advertisement")
exutil.By(fmt.Sprint("6.1 Patch L2 Advertisement to use interface that does not exist on nodes for test case", testID))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"eno1\"]}}", "metallb-system")
exutil.By(fmt.Sprintf("6.2 Create service for test case %s", testID))
svc.name = "hello-world-" + testID
svc.externaltrafficpolicy = "Cluster"
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("6.3 Check the event is generated for the interface")
isEvent, _ := checkServiceEvents(oc, svc.name, svc.namespace, "announceFailed")
o.Expect(isEvent).To(o.BeTrue())
exutil.By("6.4 Validate LoadBalancer service is not reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
//There should not be any MAC address associated with service IP.
_, macAddressResult := obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
exutil.By("6.5 Validate LoadBalancer service is reachable after L2 Advertisement is updated")
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
testID = "60515"
exutil.By("7.0 60515 Verify service IP from IP addresspool for set of worker nodes is announced from a specific interface")
exutil.By(fmt.Sprintf("8.1 Update interfaces and nodeSelector of %s", l2advertisement.name))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"eno1\", \"eno2\"]}}", "metallb-system")
patchNodeSelector := fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(vmList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, patchNodeSelector, "metallb-system")
exutil.By("7.2 Create L2 service that is unreachable")
svc.name = "hello-world-" + testID
svc.externaltrafficpolicy = "Cluster"
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("7.3 Validate LoadBalancer service is not reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
_, macAddressResult = obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
exutil.By("7.4 Create another l2advertisement CR with same ip addresspool but different set of nodes and interface")
l2advertisement1 := l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement1.name, "-n", l2advertisement1.namespace)
result = createL2AdvertisementCR(oc, l2advertisement1, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
patchNodeSelector = fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(vmList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, patchNodeSelector, "metallb-system")
exutil.By("7.5 Check the event is not generated for the interface")
isEvent, _ = checkServiceEvents(oc, svc.name, svc.namespace, "announceFailed")
o.Expect(isEvent).To(o.BeFalse())
exutil.By("7.6 Get LoadBalancer service IP announcing node")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
exutil.By("7.7 Verify the service is functional as the another L2 advertisement is used for the ip addresspool")
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
testID = "60518"
i := 0
var svcIPs []string
exutil.By("8.0 60518 Verify configuration changes like updating the L2 advertisement to add interface, removing L2advertisement and updating addresspool works.")
removeResource(oc, true, true, "l2advertisements", l2advertisement1.name, "-n", l2advertisement1.namespace)
exutil.By(fmt.Sprintf("8.1 Update interfaces and nodeSelector of %s", l2advertisement.name))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"br-ex\", \"eno2\"]}}", "metallb-system")
patchNodeSelector = fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(vmList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, patchNodeSelector, "metallb-system")
exutil.By("8.2 Create L2 service")
svc.name = "hello-world-" + testID + "-" + strconv.Itoa(i)
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("8.3 Validate LoadBalancer service is reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
exutil.By(fmt.Sprintf("8.4 Delete the L2 advertisement resource named %s", l2advertisement.name))
removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
exutil.By(fmt.Sprintf("8.5 Validate service with name %s is unreachable", svc.name))
nodeName = getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
_, macAddressResult = obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
svcIPs = append(svcIPs, svcIP)
exutil.By("8.6 Create another service request IP address from second IP addresspool, so see it is unreachable")
i = i + 1
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
annotatedSvc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-" + strconv.Itoa(i),
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: serviceSelectorValue[0],
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddresspools[1],
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The %s service created successfully with %s with annotation %s:%s", annotatedSvc.name, svcIP, annotatedSvc.annotationKey, annotatedSvc.annotationValue)
svcIPs = append(svcIPs, svcIP)
_, macAddressResult = obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
exutil.By("8.7 Create L2 Advertisements with both ip address pools")
l2advertisement = l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
addrPoolList, err := json.Marshal(ipaddresspools)
o.Expect(err).NotTo(o.HaveOccurred())
patchIPAddresspools := fmt.Sprintf("{\"spec\":{\"ipAddressPools\": %s}}", string(addrPoolList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, patchIPAddresspools, "metallb-system")
exutil.By("8.8 Both services are functional")
for i = 0; i < 2; i++ {
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIPs[i])
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service at %s to be reachable but was unreachable", svcIPs[i]))
}
testID = "60519"
exutil.By("9.0 60519 Verify interface can be selected across l2advertisements.")
exutil.By(fmt.Sprintf("9.1 Update interface list of %s L2 Advertisement object to non functional", l2advertisement.name))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"eno1\", \"eno2\"]}}", "metallb-system")
exutil.By("9.2 Create another L2 Advertisement")
l2advertisement1 = l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement1.name, "-n", l2advertisement1.namespace)
result = createL2AdvertisementCR(oc, l2advertisement1, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, "{\"spec\":{\"nodeSelectors\": []}}", "metallb-system")
exutil.By("9.3 Create L2 Service")
svc.name = "hello-world-" + testID
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("9.4 Validate LoadBalancer service is reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
})
// Test cases service annotation
g.It("Author:asood-High-43155-High-43156-High-43313-Verify static address is associated with LoadBalancer service specified in YAML, approriate messages are logged if it cannot be and services can share IP [Serial]", func() {
var (
ns string
namespaces []string
testID = "43155"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
vmWorkers []string
ipaddresspools []string
requestedIp = "192.168.111.65"
)
//Two worker nodes needed to create l2advertisement object
exutil.By("1. Determine suitability of worker nodes for the test")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes, virtual and real each.")
}
for i := 0; i < 2; i++ {
vmWorkers = append(vmWorkers, workerList.Items[i].Name)
}
exutil.By("2. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("3. Create IP addresspools")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By(fmt.Sprintf("IP address pool %s created successfully", ipaddresspools[:]))
//Ensure address is not assigned from address pool automatically by setting autoAssign to false
addressList, err := json.Marshal(l2Addresses[1][:])
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"autoAssign\": false, \"addresses\": %s, \"serviceAllocation\":{\"serviceSelectors\":[], \"namespaces\":[\"%s\"], \"namespaceSelectors\":[] }}}", string(addressList), "test-"+testID)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[1], patchInfo, "metallb-system")
exutil.By("4. Create L2 Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
//Just assign one of the addresspool, use the second one later
ipaddrpools := []string{ipaddresspools[0], ""}
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("5.0 %s Verify L2 service requesting specific IP %s.", testID, requestedIp))
exutil.By("5.1 Create L2 LoadBalancer service with annotated IP address")
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
annotatedSvc := loadBalancerServiceResource{
name: "hello-world-" + testID,
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: serviceSelectorValue[0],
annotationKey: "metallb.universe.tf/loadBalancerIPs",
annotationValue: requestedIp,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
exutil.By(fmt.Sprintf("5.2. Create a service with ETP Cluster with name %s", annotatedSvc.name))
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("5.3 Validate LoadBalancer service")
svcErr := checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The service %s External IP is %s", annotatedSvc.name, svcIP)
checkSvcErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 4*time.Minute, false, func(ctx context.Context) (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", annotatedSvc.name, svcIP))
testID = "43156"
exutil.By(fmt.Sprintf("6.0 %s Verify L2 service requesting IP from pool %s for AllocationFailed.", testID, ipaddresspools[1]))
exutil.By("6.1 Create L2 LoadBalancer service with annotated IP address pool")
annotatedSvc.name = "hello-world-" + testID + "-0"
annotatedSvc.annotationKey = "metallb.universe.tf/address-pool"
annotatedSvc.annotationValue = ipaddresspools[1]
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("6.2 Validate LoadBalancer service")
//Use interval and timeout as it is expected IP assignment will fail
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name, 5*time.Second, 30*time.Second)
o.Expect(svcErr).To(o.HaveOccurred())
exutil.By("6.3 Validate allocation failure reason")
isEvent, msg := checkServiceEvents(oc, annotatedSvc.name, annotatedSvc.namespace, "AllocationFailed")
o.Expect(isEvent).To(o.BeTrue())
o.Expect(strings.Contains(msg, fmt.Sprintf("pool %s not compatible for ip assignment", ipaddresspools[1]))).To(o.BeTrue())
exutil.By("6.4 Update IP address pool %s address range for already used IP address")
patchInfo = fmt.Sprintf("{\"spec\":{\"addresses\":[\"%s-%s\"]}}", requestedIp, requestedIp)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[0], patchInfo, "metallb-system")
exutil.By("6.5 Create another service AllocationFailed reason ")
annotatedSvc.name = "hello-world-" + testID + "-1"
annotatedSvc.annotationKey = "metallb.universe.tf/address-pool"
annotatedSvc.annotationValue = ipaddresspools[0]
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("6.6 Validate LoadBalancer service")
//Use interval and timeout as it is expected IP assignment will fail
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name, 5*time.Second, 30*time.Second)
o.Expect(svcErr).To(o.HaveOccurred())
exutil.By("6.7 Validate allocation failure reason")
isEvent, msg = checkServiceEvents(oc, annotatedSvc.name, annotatedSvc.namespace, "AllocationFailed")
o.Expect(isEvent).To(o.BeTrue())
o.Expect(strings.Contains(msg, fmt.Sprintf("no available IPs in pool \"%s\"", ipaddresspools[0]))).To(o.BeTrue())
testID = "43313"
exutil.By(fmt.Sprintf("7.0 %s Verify one address can be associated with more than one service using annotation metallb.universe.tf/allow-shared-ip", testID))
exutil.By(fmt.Sprintf("7.1 Patch IP addresspool pool %s address range to original range", ipaddresspools[0]))
addressList, err = json.Marshal(l2Addresses[0][:])
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo = fmt.Sprintf("{\"spec\":{\"addresses\": %s}}", string(addressList))
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[0], patchInfo, "metallb-system")
annotationForSvc := fmt.Sprintf("\"shared-ip-%s-svc\"", testID)
exutil.By("7.2 Create first L2 LoadBalancer service with annotation")
annotatedSvc.name = "hello-world-" + testID + "-tcp"
annotatedSvc.annotationKey = "metallb.universe.tf/allow-shared-ip"
annotatedSvc.annotationValue = annotationForSvc
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("7.3 Validate LoadBalancer service is assigned an IP")
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP1 := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The service %s External IP is %s", annotatedSvc.name, svcIP1)
exutil.By("7.4 Create second L2 LoadBalancer service with annotation")
annotatedSvc.name = "hello-world-" + testID + "-udp"
annotatedSvc.annotationKey = "metallb.universe.tf/allow-shared-ip"
annotatedSvc.annotationValue = annotationForSvc
annotatedSvc.protocol = "UDP"
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("7.5 Validate LoadBalancer service is assigned an IP")
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP2 := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The service %s External IP is %s", annotatedSvc.name, svcIP2)
o.Expect(svcIP1).To(o.BeEquivalentTo(svcIP2))
exutil.By(fmt.Sprintf("7.6 Validate LoadBalancer services sharing the IP address %s", svcIP1))
exutil.By(fmt.Sprintf("7.6.1 LoadBalancer service at IP address %s configured with TCP", svcIP1))
checkSvcErr = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 4*time.Minute, false, func(ctx context.Context) (bool, error) {
result := validateService(oc, proxyHost, svcIP1)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", annotatedSvc.name, svcIP))
exutil.By(fmt.Sprintf("7.6.2 LoadBalancer service at IP address %s configured with UDP", svcIP2))
allUdpSvcPods, getPodsErr := exutil.GetAllPodsWithLabel(oc, ns, "name="+annotatedSvc.name)
o.Expect(getPodsErr).NotTo(o.HaveOccurred())
exutil.By("Listen on port 80 on a backend pod of UDP service")
e2e.Logf("Listening on pod %s", allUdpSvcPods[0])
cmdNcat, cmdOutput, _, ncatCmdErr := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, allUdpSvcPods[0], "bash", "-c", `timeout --preserve-status 60 ncat -u -l 8080`).Background()
defer cmdNcat.Process.Kill()
o.Expect(ncatCmdErr).NotTo(o.HaveOccurred())
allTcpSvcPods, getPodsErr := exutil.GetAllPodsWithLabel(oc, ns, "name=hello-world-"+testID+"-tcp")
o.Expect(getPodsErr).NotTo(o.HaveOccurred())
e2e.Logf("Sending UDP packets from pod %s to service %s", allTcpSvcPods[0], annotatedSvc.name)
cmd := fmt.Sprintf("echo hello | ncat -v -u %s 80", svcIP2)
for i := 0; i < 5; i++ {
output, ncatCmdErr := execCommandInSpecificPod(oc, ns, allTcpSvcPods[0], cmd)
o.Expect(ncatCmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(output), "bytes sent")).To(o.BeTrue())
}
e2e.Logf("UDP pod server output %s", cmdOutput)
o.Expect(strings.Contains(cmdOutput.String(), "hello")).To(o.BeTrue())
})
//https://issues.redhat.com/browse/OCPBUGS-14769
g.It("Author:asood-High-64809-ovnkube-node sends netlink delete request deleting conntrack entries for API redirect iptables rule [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "64809"
)
exutil.By("1. Get API VIP for cluster and Node hosting the VIP")
apiVIP := GetAPIVIPOnCluster(oc)
if apiVIP == "" {
g.Skip("This case requires API VIP to configured on the cluster")
}
apiVIPNode := FindVIPNode(oc, apiVIP)
if apiVIPNode == "" {
g.Skip("This case requires API VIP to configured on the cluster on one of nodes, found none")
}
e2e.Logf("API VIP %s on the cluster is configured on %s", apiVIP, apiVIPNode)
exutil.By("2. Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("3. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("4. Create L2 Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
conntrackRulesCmd := fmt.Sprintf("conntrack -E -o timestamp | grep %s | grep DESTROY | grep -v CLOSE | grep 6443 | grep ESTABL", apiVIP)
cmdContrackRulesdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+apiVIPNode, "--", "chroot", "/host", "bash", "-c", conntrackRulesCmd).Background()
defer cmdContrackRulesdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: false,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
for i := 0; i < 10; i++ {
svc.name = "hello-world-" + testID + "-" + strconv.Itoa(i)
exutil.By(fmt.Sprintf("Create a service %s with ExtenalTrafficPolicy Cluster", svc.name))
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("Validate LoadBalancer service %s", svc.name))
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("LB service created with IP %s", svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("DeleteLoadBalancer service %s", svc.name))
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
}
e2e.Logf("Conntrack rules output \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "")).Should(o.BeTrue())
})
g.It("Author:qiowang-High-51186-High-54819-Validate ipAddressPoolSelector, ipAddressPool and nodeSelector are honored when advertising service IP address with L2 advertisement [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
ipAddressPoolSelectorsKey = "zone"
ipAddressPoolSelectorsValues = [2][2]string{{"east"}, {"west"}}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "51186"
expectedAddress1 = "192.168.111.65"
expectedAddress2 = "192.168.111.75"
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
loadBalancerServiceAnnotatedTemplate = filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
)
exutil.By("1. Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("2. Create two IP addresspools with different labels")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + testID + "-" + strconv.Itoa(i),
namespace: opNamespace,
addresses: l2Addresses[i][:],
namespaces: namespaces,
label1: ipAddressPoolSelectorsKey,
value1: ipAddressPoolSelectorsValues[i][0],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By("3. Create L2Advertisement with ipAddressPool and nodeSelectors")
l2advertisement := l2AdvertisementResource{
name: "l2-adv" + testID,
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
exutil.By("4. Create LoadBalancer services using Layer 2 addresses")
svc := loadBalancerServiceResource{
name: "hello-world-cluster",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddresspools[0],
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
exutil.By("5. Check IP address assigned from addresspool, and advertised only on one of the node listed in l2advertisements")
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node %s announcing the service IP", nodeName)
o.Expect(nodeName).Should(o.Or(o.Equal(workers[0]), o.Equal(workers[1])))
exutil.By("6. Remove the previously created services")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
removeResource(oc, true, true, "replicationcontroller", svc.name, "-n", svc.namespace)
exutil.By("7. Update L2Advertisement, update ipAddressPool and nodeSelectors, add ipAddressPoolSelectors")
patchL2Advertisement := `[{"op": "replace", "path": "/spec/ipAddressPools", "value": [""]}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["` + workers[1] + `"]}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "l2advertisement", l2advertisement.name, "--type=json", "-p", patchL2Advertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
patchIPAddrPoolSelectors := `{"spec":{"ipAddressPoolSelectors":[{"matchExpressions": [{"key": "` + ipAddressPoolSelectorsKey + `","operator": "In","values": ["` + ipAddressPoolSelectorsValues[1][0] + `"]}]}]}}`
patchResourceAsAdmin(oc, "l2advertisement/"+l2advertisement.name, patchIPAddrPoolSelectors, "metallb-system")
exutil.By("8. Create LoadBalancer services requesting address from the second ipaddresspools")
svc.annotationValue = ipaddresspools[1]
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
exutil.By("9. Check IP address assigned from the second addresspool, and advertised only on one of the node listed in l2advertisements")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress2)).To(o.BeTrue())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
nodeName = getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node %s announcing the service IP", nodeName)
o.Expect(nodeName).Should(o.Equal(workers[1]))
exutil.By("10. OCP-54819-Add label to the first worker node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[0], "zone")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], "zone", "east")
exutil.By("11. OCP-54819-Edit the l2advertisement to modify the node selection")
patchL2Advertisement = `[{"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/key", "value":"zone"}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["east"]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "l2advertisement", l2advertisement.name, "--type=json", "-p", patchL2Advertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("12. OCP-54819-Check the changes to nodeSelector in L2advertisements are reflected where the service IP is announced")
nodeName = getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node %s announcing the service IP", nodeName)
o.Expect(nodeName).Should(o.Equal(workers[0]))
})
g.It("Author:meinli-High-43243-The L2 service with externalTrafficPolicy Local continues to service requests even when node announcing the service goes down. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipAddresspoolFile = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
ipaddresspools []string
namespaces []string
serviceSelectorKey = "name"
serviceSelectorValue = [1]string{"test-service"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
)
exutil.By("1. Get the namespace, masters and workers")
workerList := excludeSriovNodes(oc)
if len(workerList) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than three nodes")
}
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test43243")
exutil.By("2. create address pool with addresses from worker nodes")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolFile,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolFile)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
e2e.Logf("IP address pools %s ", ipaddresspools)
exutil.By("3. create a L2 advertisement using the above addresspool")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
nodeSelectorValues: workerList[:],
interfaces: interfaces[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("4. create a service with externalTrafficPolicy Local")
for i := 0; i < 2; i++ {
pod := pingPodResourceNode{
name: "hello-pod-" + strconv.Itoa(i),
namespace: ns,
nodename: workerList[i],
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, ns, pod.name)
}
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "LoadBalancer",
ipFamilyPolicy: "SingleStack",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "Local",
template: genericServiceTemplate,
}
svc.createServiceFromParams(oc)
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.servicename)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.servicename)
e2e.Logf("The service %s External IP is %q", svc.servicename, svcIP)
result = validateService(oc, proxyHost, svcIP+":27017")
o.Expect(result).To(o.BeTrue())
exutil.By("5. Validate service IP announcement being taken over by another node")
nodeName1 := getNodeAnnouncingL2Service(oc, svc.servicename, ns)
defer checkNodeStatus(oc, nodeName1, "Ready")
rebootNode(oc, nodeName1)
checkNodeStatus(oc, nodeName1, "NotReady")
nodeName2 := getNodeAnnouncingL2Service(oc, svc.servicename, ns)
o.Expect(strings.Join(workerList, ",")).Should(o.ContainSubstring(nodeName2))
if nodeName2 != nodeName1 {
e2e.Logf("%s worker node taken over the service successfully!!!", nodeName2)
} else {
e2e.Fail("No worker node taken over the service after reboot")
}
// verify the service request after another worker nodeAssigned
for i := 0; i < 2; i++ {
o.Expect(validateService(oc, proxyHost, svcIP+":27017")).To(o.BeTrue())
}
})
g.It("Author:meinli-High-43242-The L2 service with externalTrafficPolicy Cluster continues to service requests even when node announcing the service goes down. [Disruptive]", func() {
var (
ipAddresspoolFile = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
ipaddresspools []string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
)
exutil.By("1. Get the namespace, masters and workers")
workerList := excludeSriovNodes(oc)
if len(workerList) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test43242")
exutil.By("2. create address pool with addresses from worker nodes")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolFile,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolFile)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
e2e.Logf("IP address pools %s ", ipaddresspools)
exutil.By("3. create a L2 advertisement using the above addresspool")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
nodeSelectorValues: workerList[:],
interfaces: interfaces[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("4. create a service with externalTrafficPolicy Cluster")
svc := loadBalancerServiceResource{
name: "test-rc",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
externaltrafficpolicy: "Cluster",
allocateLoadBalancerNodePorts: false,
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=10", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ns, "name="+svc.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("this pod with label name=%s not ready", svc.name))
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
result = validateService(oc, proxyHost, svcIP+":80")
o.Expect(result).To(o.BeTrue())
exutil.By("5. Validate service IP announcement being taken over by another node")
nodeName1 := getNodeAnnouncingL2Service(oc, svc.name, ns)
defer checkNodeStatus(oc, nodeName1, "Ready")
rebootNode(oc, nodeName1)
checkNodeStatus(oc, nodeName1, "NotReady")
nodeName2 := getNodeAnnouncingL2Service(oc, svc.name, ns)
o.Expect(strings.Join(workerList, ",")).Should(o.ContainSubstring(nodeName2))
if nodeName2 != nodeName1 {
e2e.Logf("%s worker node taker over the service successfully!!!", nodeName2)
} else {
e2e.Fail("No worker node taker over the service after reboot")
}
// verify the service request after another worker nodeAssigned
for i := 0; i < 2; i++ {
o.Expect(validateService(oc, proxyHost, svcIP+":80")).To(o.BeTrue())
}
})
})
// L3 Tests
var _ = g.Describe("[sig-networking] SDN metallb l3", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-metallb", exutil.KubeConfigPath())
opNamespace = "metallb-system"
opName = "metallb-operator"
serviceLabelKey = "environ"
serviceLabelValue = "Test"
testDataDir = exutil.FixturePath("testdata", "networking/metallb")
bgpAddresses = [2][2]string{{"10.10.10.0-10.10.10.10", "10.10.11.1-10.10.11.10"}, {"10.10.12.1-10.10.12.10", "10.10.13.1-10.10.13.10"}}
myASN = 64500
peerASN = 64500
peerIPAddress = "192.168.111.60"
bgpCommunties = [1]string{"65001:65500"}
metalLBNodeSelKey = "node-role.kubernetes.io/worker"
metalLBNodeSelVal = ""
metalLBControllerSelKey = "node-role.kubernetes.io/worker"
metalLBControllerSelVal = ""
ipAddressPoolLabelKey = "zone"
ipAddressPoolLabelVal = "east"
)
g.BeforeEach(func() {
exutil.By("Check the platform if it is suitable for running the test")
networkType := exutil.CheckNetworkType(oc)
if !(isPlatformSuitable(oc)) || !strings.Contains(networkType, "ovn") {
g.Skip("These cases can only be run on networking team's private BM RDU clusters , skip for other platform or other non-OVN network plugin!!!")
}
namespaceTemplate := filepath.Join(testDataDir, "namespace-template.yaml")
operatorGroupTemplate := filepath.Join(testDataDir, "operatorgroup-template.yaml")
subscriptionTemplate := filepath.Join(testDataDir, "subscription-template.yaml")
sub := subscriptionResource{
name: "metallb-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
exutil.By("Check the catalog source")
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
exutil.By("Making sure CRDs are successfully installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "bfdprofiles.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgpadvertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgppeers.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "communities.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ipaddresspools.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "l2advertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "metallbs.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrconfigurations.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrnodestates.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "servicel2statuses.metallb.io")).To(o.BeTrue())
exutil.By("Create MetalLB CR")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: metalLBNodeSelKey,
nodeSelectorVal: metalLBNodeSelVal,
controllerSelectorKey: metalLBControllerSelKey,
controllerSelectorVal: metalLBControllerSelVal,
template: metallbCRTemplate,
}
o.Expect(createMetalLBCR(oc, metallbCR, metallbCRTemplate)).To(o.BeTrue())
exutil.By("SUCCESS - MetalLB CR Created")
})
g.It("Author:asood-High-60097-High-60098-High-60099-High-60159-Verify ip address is assigned from the ip address pool that has higher priority (lower value), matches namespace, service name or the annotated IP pool in service [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
bgpPassword string
expectedAddress1 = "10.10.10.1"
expectedAddress2 = "10.10.12.1"
)
//Two worker nodes needed to create BGP Advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test60097")
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspools with different priority")
priority_val := 10
for i := 0; i < 2; i++ {
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[i][:],
namespaces: namespaces,
priority: priority_val,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
priority_val = priority_val + 10
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
}
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
addrPoolList, err := json.Marshal(ipaddrpools)
o.Expect(err).NotTo(o.HaveOccurred())
patchIPAddresspools := fmt.Sprintf("{\"spec\":{\"ipAddressPools\": %s}}", string(addrPoolList))
patchResourceAsAdmin(oc, "bgpadvertisements/"+bgpAdvertisement.name, patchIPAddresspools, "metallb-system")
exutil.By("7. Create a service to verify it is assigned address from the pool that has higher priority")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-60097",
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60097 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
exutil.By("OCP-60098 Verify ip address from pool is assigned only to the service in project matching namespace or namespaceSelector in ip address pool.")
exutil.By("8.0 Update first ipaddress pool's the match label and match expression for the namespace property")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[0], "{\"spec\":{\"serviceAllocation\": {\"namespaceSelectors\": [{\"matchExpressions\": [{\"key\": \"region\", \"operator\": \"In\", \"values\": [\"SA\"]}]}, {\"matchLabels\": {\"environ\": \"Dev\"}}]}}}", "metallb-system")
exutil.By("8.1 Update first ipaddress pool's priority")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[0], "{\"spec\":{\"serviceAllocation\": {\"priority\": 20}}}", "metallb-system")
exutil.By("8.2 Update first ipaddress pool's namespaces property")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[0], "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", "metallb-system")
exutil.By("9. Label the namespace")
_, errNs := oc.AsAdmin().Run("label").Args("namespace", ns, "environ=Test", "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
_, errNs = oc.AsAdmin().Run("label").Args("namespace", ns, "region=NA").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("10. Delete the service in namespace and recreate it to see the address assigned from the pool that matches namespace selector")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
svc.name = "hello-world-60098"
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60098 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress2)).To(o.BeTrue())
exutil.By("OCP-60099 Verify ip address from pool is assigned only to the service matching serviceSelector in ip address pool")
exutil.By("11.0 Update second ipaddress pool's the match label and match expression for the namespace property")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"namespaceSelectors\": [{\"matchExpressions\": [{\"key\": \"region\", \"operator\": \"In\", \"values\": [\"SA\"]}]}, {\"matchLabels\": {\"environ\": \"Dev\"}}]}}}", "metallb-system")
exutil.By("11.1 Update second ipaddress pool's namesapces")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", "metallb-system")
exutil.By("11.2 Update second ipaddress pool's service selector")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"serviceSelectors\": [{\"matchExpressions\": [{\"key\": \"environ\", \"operator\": \"In\", \"values\": [\"Dev\"]}]}]}}}", "metallb-system")
exutil.By("12. Delete the service in namespace and recreate it to see the address assigned from the pool that matches namespace selector")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
svc.name = "hello-world-60099"
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60099 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
exutil.By("OCP-60159 Verify the ip address annotation in service metallb.universe.tf/address-pool in namepace overrides the priority and service selectors in ip address pool.")
exutil.By("13. Delete the service created in namespace to ensure eligible IP address is released")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
exutil.By("14. Update the priority on second address to be eligible for address assignment")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"priority\": 10}}}", "metallb-system")
exutil.By("15. Label the namespace to ensure the both addresspools are eligible for address assignment")
_, errNs = oc.AsAdmin().Run("label").Args("namespace", ns, "environ=Dev", "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
_, errNs = oc.AsAdmin().Run("label").Args("namespace", ns, "region=SA", "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("16. Create a service with annotation to obtain IP from first addresspool")
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
svc = loadBalancerServiceResource{
name: "hello-world-60159",
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: serviceSelectorValue[0],
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddrpools[0],
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60159 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
})
g.It("Author:asood-High-50946-Medium-69612-Verify .0 and .255 addresses in IPAddressPool are handled with avoidBuggIPs and MetalLB exposes password in clear text [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
testID = "50946"
ipAddressList = [3]string{"10.10.10.0-10.10.10.0", "10.10.10.255-10.10.10.255", "10.10.10.1-10.10.10.1"}
expectedIPAddressList = [3]string{"10.10.10.0", "10.10.10.255", "10.10.10.1"}
bgpPassword string
)
//Two worker nodes needed to create BGP Advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("Label the namespace")
_, errNs := oc.AsAdmin().Run("label").Args("namespace", ns, namespaceLabelKey+"="+namespaceLabelValue[0], "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = "bgp-test"
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspools with three addresses, including two buggy ones")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: ipAddressList[:],
namespaces: namespaces[:],
priority: 0,
avoidBuggyIPs: false,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Create services to verify it is assigned buggy IP addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
for i := 0; i < 2; i++ {
svc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-" + strconv.Itoa(i),
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedIPAddressList[i])).To(o.BeTrue())
}
exutil.By("8. Delete the previously created services and set avoidBuggyIP to true in ip address pool")
for i := 0; i < 2; i++ {
removeResource(oc, true, true, "service", "hello-world-"+testID+"-"+strconv.Itoa(i), "-n", namespaces[0])
}
addressList, err := json.Marshal(ipAddressList)
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"avoidBuggyIPs\": true, \"addresses\": %s}}", string(addressList))
patchResourceAsAdmin(oc, "ipaddresspools/"+ipAddresspool.name, patchInfo, "metallb-system")
exutil.By("9. Verify the service is created with ip address that is not a buggy")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-3",
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedIPAddressList[2])).To(o.BeTrue())
exutil.By("10. OCPBUGS-3825 Check BGP password is not in clear text")
//https://issues.redhat.com/browse/OCPBUGS-3825
podList, podListErr := exutil.GetAllPodsWithLabel(oc, opNamespace, "component=frr-k8s")
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podList)).NotTo(o.Equal(0))
searchString := fmt.Sprintf("neighbor '%s' password <retracted>", peerIPAddress)
for _, pod := range podList {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", opNamespace, pod, "-c", "reloader").OutputToFile("podlog")
o.Expect(err).NotTo(o.HaveOccurred())
grepOutput, err := exec.Command("bash", "-c", "cat "+output+" | grep -i '"+searchString+"' | wc -l").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Found %s occurences in logs of %s pod", grepOutput, pod)
o.Expect(grepOutput).NotTo(o.Equal(0))
}
})
g.It("Author:qiowang-High-46652-Verify LoadBalancer service can be created running at Layer 3 using BGP peering with BFD profile [Serial]", func() {
var (
workers []string
ipaddresspools []string
bgpPeers []string
namespaces []string
expectedHostPrefixes []string
bgpPassword string
expectedAddress1 = "10.10.10.1"
bfdEnabled = "yes"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
BFDProfileTemplate = filepath.Join(testDataDir, "bfdprofile-template.yaml")
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
BGPPeerTemplate = filepath.Join(testDataDir, "bgppeer-template.yaml")
bgpAdvertisementTemplate = filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
)
//Two worker nodes needed to create BGP Advertisement object
workerList, getWorkersErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getWorkersErr).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run on cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Create BFD profile")
BFDProfileCR := bfdProfileResource{
name: "bfd-profile-46652",
namespace: opNamespace,
detectMultiplier: 37,
echoMode: true,
echoReceiveInterval: 38,
echoTransmitInterval: 39,
minimumTtl: 10,
passiveMode: true,
receiveInterval: 35,
transmitInterval: 35,
template: BFDProfileTemplate,
}
defer removeResource(oc, true, true, "bfdprofile", BFDProfileCR.name, "-n", BFDProfileCR.namespace)
o.Expect(createBFDProfileCR(oc, BFDProfileCR)).To(o.BeTrue())
exutil.By("2. Set up upstream/external BGP router, enable BFD")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword, bfdEnabled, BFDProfileCR.name)).To(o.BeTrue())
exutil.By("3. Create IP addresspool")
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test46652")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-bgp-46652",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("4. Create BGP Peer")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: "",
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
bgpPeers = append(bgpPeers, BGPPeerCR.name)
exutil.By("5. Patch the BGPPeer with BFD Profile")
patchBFDProfile := fmt.Sprintf("{\"spec\":{\"bfdProfile\": \"%s\"}}", BFDProfileCR.name)
patchResourceAsAdmin(oc, "bgppeer/"+BGPPeerCR.name, patchBFDProfile, "metallb-system")
exutil.By("6. Create BGP Advertisement")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-46652",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddresspools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("8. Check BFD Session is up")
o.Expect(checkBFDSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("9. Create a service")
svc := loadBalancerServiceResource{
name: "hello-world-46652",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
statusErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-46652 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("10. Verify route is advertised")
expectedHostPrefixes = append(expectedHostPrefixes, expectedAddress1+"/32")
o.Expect(verifyHostPrefixAdvertised(oc, bgpRouterNamespaceWithSuffix, expectedHostPrefixes)).To(o.BeTrue())
})
g.It("Author:asood-High-50945-Verify the L2 and L3 IP address can be assigned to services respectively from the IP address pool based on the advertisement.[Serial]", func() {
var (
testID = "50945"
workers []string
bgpPeers []string
namespaces []string
ipaddresspools = make(map[int][]string)
expectedHostPrefixes []string
bgpPassword string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
BGPPeerTemplate = filepath.Join(testDataDir, "bgppeer-template.yaml")
bgpAdvertisementTemplate = filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
loadBalancerServiceAnnotatedTemplate = filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
l2Addresses = [2][2]string{{"192.168.111.65-192.168.111.69", "192.168.111.70-192.168.111.74"}, {"192.168.111.75-192.168.111.79", "192.168.111.80-192.168.111.85"}}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
expectedAddressList = [2]string{"10.10.10.1", "192.168.111.65"}
)
//Two worker nodes needed to create BGP Advertisement object
workerList, getWorkersErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getWorkersErr).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run on cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Get the namespace")
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("Label the namespace")
_, errNsLabel := oc.AsAdmin().Run("label").Args("namespace", ns, namespaceLabelKey+"="+namespaceLabelValue[0], "--overwrite").Output()
o.Expect(errNsLabel).NotTo(o.HaveOccurred())
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = "bgp-test"
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create L3 and L2 IP addresspools")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-bgp-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools[0] = append(ipaddresspools[0], ipAddresspool.name)
ipAddresspool.name = "ipaddresspool-l2-" + testID
ipAddresspool.addresses = l2Addresses[0][:]
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools[1] = append(ipaddresspools[1], ipAddresspool.name)
exutil.By("6. Create BGP and L2 Advertisements")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-" + testID,
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddresspools[0],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
l2advertisement := l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddresspools[1],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
svcList := [2]string{"-l3-", "-l2-"}
exutil.By("7. Create L2 and L3 service")
annotatedSvc := loadBalancerServiceResource{
name: "",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: "",
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
for i := 0; i < 2; i++ {
annotatedSvc.name = "hello-world" + svcList[i] + testID
annotatedSvc.annotationValue = ipaddresspools[i][0]
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The %s service with annotation %s:%s created successfully, and assigned %s", annotatedSvc.name, annotatedSvc.annotationKey, annotatedSvc.annotationValue, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddressList[i])).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
}
exutil.By("8. Verify route is advertised")
expectedHostPrefixes = append(expectedHostPrefixes, expectedAddressList[0]+"/32")
o.Expect(verifyHostPrefixAdvertised(oc, bgpRouterNamespaceWithSuffix, expectedHostPrefixes)).To(o.BeTrue())
exutil.By(fmt.Sprintf("9. Update the L2 IP Addresspool %s", ipaddresspools[1][0]))
patchL2AddressPool := `[{"op": "replace", "path": "/spec/serviceAllocation/serviceSelectors/0/matchLabels", "value": {"environ": "Dev"}}, {"op": "replace", "path": "/spec/serviceAllocation/serviceSelectors/0/matchExpressions", "value":[{"key":"environ", "operator":"In", "values":["Dev"]}]} ]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "ipaddresspools", ipaddresspools[1][0], "--type=json", "-p", patchL2AddressPool).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("10. Delete previously created services and create new ones without ip address pool annotation")
for i := 0; i < 2; i++ {
svcName := "hello-world" + svcList[i] + testID
removeResource(oc, true, true, "service", svcName, "-n", ns)
}
svcLabelValList := [2]string{"Test", "Dev"}
svc := loadBalancerServiceResource{
name: "",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: "",
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
exutil.By("11. Create L3 and L2 services")
for i := 0; i < 2; i++ {
svc.name = "hello-world" + svcList[i] + testID
svc.labelValue = svcLabelValList[i]
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The %s service created successfully IP %s assigned to it", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddressList[i])).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
}
})
g.It("Author:qiowang-High-51187-High-54820-Validate ipAddressPoolSelector, ipAddressPool and nodeSelector are honored when advertising service IP address via BGP advertisement [Serial]", func() {
var (
workers []string
nodeIPs []string
ipaddresspools []string
bgpPeers []string
namespaces []string
expectedPaths1 []string
expectedPaths2 []string
expectedPaths3 []string
bgpPassword string
expectedAddress1 = "10.10.10.1"
expectedAddress2 = "10.10.12.1"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
ipAddressPoolSelectorsKey = "zone"
ipAddressPoolSelectorsValues = [2][2]string{{"east"}, {"west"}}
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
BGPPeerTemplate = filepath.Join(testDataDir, "bgppeer-template.yaml")
bgpAdvertisementTemplate = filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
loadBalancerServiceAnnotatedTemplate = filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
)
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test51187")
//Two worker nodes needed to create BGP Advertisement object
workerList, getWorkersErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getWorkersErr).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run on cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
nodeIP := getNodeIPv4(oc, ns, workerList.Items[i].Name)
nodeIPs = append(nodeIPs, nodeIP)
}
exutil.By("1. Set up upstream/external BGP router, enable BFD")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("2. Create two IP addresspools with different labels")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-bgp-51187-" + strconv.Itoa(i),
namespace: opNamespace,
addresses: bgpAddresses[i][:],
namespaces: namespaces,
label1: ipAddressPoolSelectorsKey,
value1: ipAddressPoolSelectorsValues[i][0],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By("3. Create BGP Peer")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: "",
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
bgpPeers = append(bgpPeers, BGPPeerCR.name)
exutil.By("4. Create BGP Advertisement with ipAddressPool and nodeSelectors")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-51187",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddresspools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("5. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("6. Create a service requesting address from the first ipaddresspools")
svc := loadBalancerServiceResource{
name: "hello-world-51187",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddresspools[0],
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-51187 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
result := validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("7. Verify route is advertised")
expectedPaths1 = append(expectedPaths1, "2 available", nodeIPs[0], nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, expectedAddress1, expectedPaths1)).To(o.BeTrue())
exutil.By("8. Remove the previously created services")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
removeResource(oc, true, true, "replicationcontroller", svc.name, "-n", svc.namespace)
exutil.By("9. Update BGP Advertisement, update ipAddressPool and nodeSelectors, add ipAddressPoolSelectors")
patchBgpAdvertisement := `[{"op": "replace", "path": "/spec/ipAddressPools", "value": [""]}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["` + workers[0] + `"]}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "bgpadvertisement", bgpAdvertisement.name, "--type=json", "-p", patchBgpAdvertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
patchIPAddrPoolSelectors := `{"spec":{"ipAddressPoolSelectors":[{"matchExpressions": [{"key": "` + ipAddressPoolSelectorsKey + `","operator": "In","values": ["` + ipAddressPoolSelectorsValues[1][0] + `"]}]}]}}`
patchResourceAsAdmin(oc, "bgpadvertisement/"+bgpAdvertisement.name, patchIPAddrPoolSelectors, "metallb-system")
exutil.By("10. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("11. Create a service requesting address from the second ipaddresspools")
svc.annotationValue = ipaddresspools[1]
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The recreated service %s 's External IP for OCP-51187 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress2)).To(o.BeTrue())
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("12. Verify route is advertised")
expectedPaths2 = append(expectedPaths2, "1 available", nodeIPs[0])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, expectedAddress2, expectedPaths2)).To(o.BeTrue())
exutil.By("13. OCP-54820-Add label to the second worker node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[1], "zone")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[1], "zone", "east")
exutil.By("14. OCP-54820-Edit the BGPadvertisement to modify the node selection")
patchBgpAdvertisement = `[{"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/key", "value":"zone"}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["east"]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "bgpadvertisement", bgpAdvertisement.name, "--type=json", "-p", patchBgpAdvertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("15. OCP-54820-Check the changes to nodeSelector in BGPadvertisements are reflected which node advertises the host prefix for service")
expectedPaths3 = append(expectedPaths3, "1 available", nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, expectedAddress2, expectedPaths3)).To(o.BeTrue())
})
g.It("Author:asood-Longduration-NonPreRelease-High-46110-Verify service is functional if BGP peer is modified to cause session to re establish. [Serial]", func() {
var (
testID = "46110"
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
bgpPassword string
nodeIPs []string
expectedPath []string
)
//Two worker nodes needed to create BGP Advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
nodeIP := getNodeIPv4(oc, ns, workerList.Items[i].Name)
nodeIPs = append(nodeIPs, nodeIP)
}
masterNodeList, masterNodeErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(masterNodeErr).NotTo(o.HaveOccurred())
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test46110")
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500-" + testID,
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router is established")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Create a LB service and verify it is accessible ")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID,
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
exutil.By("8. Verify route is advertised")
expectedPath = append(expectedPath, "2 available", nodeIPs[0], nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, expectedPath)).To(o.BeTrue())
exutil.By("9. Verify by setting password for BGP peer the session is no longer established")
patchBGPPeer := `{"spec":{"password":"bgp-test"}}`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", BGPPeerCR.name, "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix, 60*time.Second)).To(o.BeFalse())
exutil.By("10. Verify by unsetting password for BGP peer the session is re established")
patchBGPPeer = `{"spec":{"password":""}}`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", BGPPeerCR.name, "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("11. Verify route is advertised after the BGP session is re established")
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, expectedPath)).To(o.BeTrue())
})
g.It("Author:asood-Longduration-NonPreRelease-High-46105-Verify only the specified node BGP peered advertise network prefixes. [Serial]", func() {
var (
testID = "46105"
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
bgpPassword string
nodeIPs []string
expectedPath []string
newExpectedPath []string
)
//Two worker nodes needed to create BGP Advertisement object
workerList := excludeSriovNodes(oc)
if len(workerList) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList[i])
nodeIP := getNodeIPv4(oc, ns, workerList[i])
nodeIPs = append(nodeIPs, nodeIP)
}
masterNodeList, masterNodeErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(masterNodeErr).NotTo(o.HaveOccurred())
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test46110")
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500-" + testID,
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router is established")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Update the BGP Peer with selected nodes ")
bgppeerWorkersList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
patchBGPPeer := fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(bgppeerWorkersList))
patchResourceAsAdmin(oc, "bgppeer/"+BGPPeerCR.name, patchBGPPeer, opNamespace)
exutil.By("8. Create a LB service and verify it is accessible ")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID,
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
exutil.By("9. Verify route is advertised")
expectedPath = append(expectedPath, "2 available", nodeIPs[0], nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, expectedPath)).To(o.BeTrue())
exutil.By("10. Label one of the nodes")
metalLBLabel := "feature.node.kubernetes.io/bgp.capable"
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workerList[0], metalLBLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workerList[0], metalLBLabel)
exutil.By("11. Update BGP peer node selector with node that is labelled")
patchBGPPeer = `[{"op": "replace", "path": "/spec/nodeSelectors", "value":[{"matchExpressions": [{"key": "` + metalLBLabel + `", "operator": "Exists"}]}]}]`
patchReplaceResourceAsAdmin(oc, "bgppeer/"+BGPPeerCR.name, patchBGPPeer, opNamespace)
exutil.By("12. Verify the advertised routes")
newExpectedPath = append(newExpectedPath, "1 available", nodeIPs[0])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, newExpectedPath)).To(o.BeTrue())
})
})
// Cross feature testing
var _ = g.Describe("[sig-networking] SDN udn metallb", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-metallb", exutil.KubeConfigPath())
opNamespace = "metallb-system"
opName = "metallb-operator"
testDataDir = exutil.FixturePath("testdata", "networking/metallb")
l2Addresses = [2][2]string{{"192.168.111.65-192.168.111.69", "192.168.111.70-192.168.111.74"}, {"192.168.111.75-192.168.111.79", "192.168.111.80-192.168.111.85"}}
l3Addresses = [2][2]string{{"10.10.10.0-10.10.10.10", "10.10.11.1-10.10.11.10"}, {"10.10.12.1-10.10.12.10", "10.10.13.1-10.10.13.10"}}
myASN = 64500
peerASN = 64500
peerIPAddress = "192.168.111.60"
metalLBNodeSelKey = "node-role.kubernetes.io/worker"
metalLBNodeSelVal = ""
metalLBControllerSelKey = "node-role.kubernetes.io/worker"
metalLBControllerSelVal = ""
ipAddressPoolLabelKey = "zone"
ipAddressPoolLabelVal = "east"
)
g.BeforeEach(func() {
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
exutil.By("Check the platform if it is suitable for running the test")
networkType := exutil.CheckNetworkType(oc)
if !(isPlatformSuitable(oc)) || !strings.Contains(networkType, "ovn") {
g.Skip("These cases can only be run on networking team's private RDU cluster, skipping for other platforms or non-OVN network plugin!!!")
}
namespaceTemplate := filepath.Join(testDataDir, "namespace-template.yaml")
operatorGroupTemplate := filepath.Join(testDataDir, "operatorgroup-template.yaml")
subscriptionTemplate := filepath.Join(testDataDir, "subscription-template.yaml")
sub := subscriptionResource{
name: "metallb-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
exutil.By("Check the catalog source")
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
exutil.By("Making sure CRDs are successfully installed")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "bfdprofiles.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgpadvertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "bgppeers.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "communities.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "ipaddresspools.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "l2advertisements.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "metallbs.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrconfigurations.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "frrnodestates.frrk8s.metallb.io")).To(o.BeTrue())
o.Expect(strings.Contains(output, "servicel2statuses.metallb.io")).To(o.BeTrue())
exutil.By("Create MetalLB CR")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: metalLBNodeSelKey,
nodeSelectorVal: metalLBNodeSelVal,
controllerSelectorKey: metalLBControllerSelKey,
controllerSelectorVal: metalLBControllerSelVal,
template: metallbCRTemplate,
}
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - MetalLB CR Created")
})
g.It("Author:asood-High-76801-Validate LB services can be created in UDN with MetalLB operator on non cloud platform. [Serial]", func() {
var (
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [2]string{"Test", "Dev"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
l2IPAddressPool []string
l3IPAddressPool []string
bgpPeers []string
bgpPassword = ""
bgpCommunties = []string{"65001:65500"}
cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
mtu int32 = 1300
prefix int32 = 24
testID = "76801"
proxyHost = "10.8.1.181"
routerNS = ""
udnTestDataDir = exutil.FixturePath("testdata", "networking")
udnCRDL2SingleStack = filepath.Join(udnTestDataDir, "udn/udn_crd_layer2_singlestack_template.yaml")
udnCRDL3SingleStack = filepath.Join(udnTestDataDir, "udn/udn_crd_singlestack_template.yaml")
udnNADTemplate = filepath.Join(udnTestDataDir, "udn/udn_nad_template.yaml")
)
exutil.By("1. Obtain the workers")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < len(workerList.Items); i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("2. Set up user defined network namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
namespaces = append(namespaces, oc.Namespace())
}
exutil.By("2.1. Create CRD for UDN in first two namespaces")
udnResourceName := []string{"l2-network-udn", "l3-network-udn"}
udnTemplate := []string{udnCRDL2SingleStack, udnCRDL3SingleStack}
udnCRD := make([]udnCRDResource, 2)
for i := 0; i < 2; i++ {
udnCRD[i] = udnCRDResource{
crdname: udnResourceName[i],
namespace: namespaces[i],
role: "Primary",
mtu: mtu,
cidr: cidr[i],
prefix: prefix,
template: udnTemplate[i],
}
switch i {
case 0:
udnCRD[0].createLayer2SingleStackUDNCRD(oc)
case 1:
udnCRD[1].createUdnCRDSingleStack(oc)
default:
// Do nothing
}
err := waitUDNCRDApplied(oc, namespaces[i], udnCRD[i].crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2.2 Create NAD for UDN in last two namespaces")
udnNADResourceName := []string{"l2-network-nad", "l3-network-nad"}
topology := []string{"layer2", "layer3"}
udnNAD := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
udnNAD[i] = udnNetDefResource{
nadname: udnNADResourceName[i],
namespace: namespaces[i+2],
nad_network_name: udnNADResourceName[i],
topology: topology[i],
subnet: "",
mtu: mtu,
net_attach_def_name: fmt.Sprintf("%s/%s", namespaces[i+2], udnNADResourceName[i]),
role: "primary",
template: udnNADTemplate,
}
udnNAD[i].subnet = cidr[i]
udnNAD[i].createUdnNad(oc)
}
exutil.By("3.1 Set up external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3.2 Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500-" + testID,
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("3.3 Check BGP Session between speakers and Router is established")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
routerNS = getRouterPodNamespace(oc)
o.Expect(routerNS).NotTo(o.BeEmpty())
exutil.By("4. Create L2 and L3 IP addresspools")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: []string{serviceSelectorValue[0], "dummy"},
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
l2IPAddressPool = append(l2IPAddressPool, ipAddresspool.name)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipAddresspool.name, "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", opNamespace)
exutil.By("SUCCESS - L2 IP Addresspool created")
ipAddresspool.name = "ipaddresspool-l3-" + testID
ipAddresspool.addresses = l3Addresses[0][:]
ipAddresspool.serviceLabelValue = serviceSelectorValue[1]
ipAddresspool.serviceSelectorValue = []string{serviceSelectorValue[1], "dummy"}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
l3IPAddressPool = append(l3IPAddressPool, ipAddresspool.name)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipAddresspool.name, "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", opNamespace)
exutil.By("SUCCESS - L3 IP Addresspool created")
exutil.By("5. Create L2 and BGP Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: l2IPAddressPool[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
l2AdvWorkersList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
patchL2Advertisement := fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/nodeSelectors/0/matchExpressions/0/values\", \"value\":%s}]", l2AdvWorkersList)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "l2advertisement", l2advertisement.name, "--type=json", "-p", patchL2Advertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-" + testID,
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: l3IPAddressPool[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("6. Create LoadBalancer services")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "",
namespace: "",
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: "",
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
for _, ns := range namespaces {
for index, serviceSelector := range serviceSelectorValue {
svc.name = "hello-world-" + testID + "-" + strconv.Itoa(index)
svc.namespace = ns
svc.labelValue = serviceSelector
exutil.By(fmt.Sprintf("6.1 Create LoadBalancer service %s in %s", svc.name, svc.namespace))
o.Expect(createLoadBalancerService(oc, svc, svc.template)).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
//svcClusterIP := getSvcIPv4(oc, svc.namespace, svc.name )
exutil.By(fmt.Sprintf("6.2 Validating service %s using external IP %s", svc.name, svcIP))
svcIPCmd := fmt.Sprintf("curl -s -I --connect-timeout 5 %s:80", svcIP)
o.Eventually(func() bool {
cmdOutput, _ := exutil.RemoteShPodWithBashSpecifyContainer(oc, routerNS, "router-master1", "testcontainer", svcIPCmd)
return strings.Contains(cmdOutput, "200 OK")
}, "120s", "10s").Should(o.BeTrue(), "Service validation failed")
// L3 addresses are not accessible outside cluster
if index == 0 {
exutil.By(fmt.Sprintf("6.3 Validating service %s using external IP %s", svc.name, svcIP))
o.Eventually(func() bool {
return validateService(oc, proxyHost, svcIP)
}, "120s", "10s").Should(o.BeTrue(), "Service validation failed")
}
}
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
199756a6-77d7-44e5-8318-0cba8607cea9
|
Author:asood-NonHyperShiftHOST-LEVEL0-StagerunBoth-High-43074-MetalLB-Operator installation
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-LEVEL0-StagerunBoth-High-43074-MetalLB-Operator installation ", func() {
g.By("Checking metalLB operator installation")
e2e.Logf("Operator install check successfull as part of setup !!!!!")
g.By("SUCCESS - MetalLB operator installed")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
d08d31fa-4041-4881-a1fd-9048c49684dc
|
Author:asood-NonHyperShiftHOST-Medium-50950-Verify community creation and webhook validation.
|
['"encoding/json"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-Medium-50950-Verify community creation and webhook validation.", func() {
communityTemplate := filepath.Join(testDataDir, "community-template.yaml")
communityCR := communityResource{
name: "community-50950",
namespace: opNamespace,
communityName: "NO_ADVERTISE",
value1: "65535",
value2: "65282",
template: communityTemplate,
}
defer removeResource(oc, true, true, "community", communityCR.name, "-n", communityCR.namespace)
result := createCommunityCR(oc, communityCR)
o.Expect(result).To(o.BeTrue())
patchCommunity := `[{"op": "add", "path": "/spec/communities/1", "value": {"name": "NO_ADVERTISE", "value":"65535:65282"}}]`
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("community", communityCR.name, "-n", communityCR.namespace, "--type=json", "-p", patchCommunity).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of community")).To(o.BeTrue())
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.