element_type
stringclasses
4 values
project_name
stringclasses
1 value
uuid
stringlengths
36
36
name
stringlengths
0
346
imports
stringlengths
0
2.67k
structs
stringclasses
761 values
interfaces
stringclasses
22 values
file_location
stringclasses
545 values
code
stringlengths
26
8.07M
global_vars
stringclasses
7 values
package
stringclasses
124 values
tags
stringclasses
1 value
test case
openshift/openshift-tests-private
69191474-7adb-4471-99a6-00c4be9b2dc5
Author:huirwang-Medium-75239-Check sctp traffic work well via udn pods user defined networks for layer2. [Disruptive]
['"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:huirwang-Medium-75239-Check sctp traffic work well via udn pods user defined networks for layer2. [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") sctpClientPod = filepath.Join(buildPruningBaseDir, "sctp/sctpclient.yaml") sctpServerPod = filepath.Join(buildPruningBaseDir, "sctp/sctpserver.yaml") sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml") sctpServerPodName = "sctpserver" sctpClientPodname = "sctpclient" ) exutil.By("Preparing the nodes for SCTP") prepareSCTPModule(oc, sctpModule) ipStackType := checkIPStackType(oc) exutil.By("Setting privileges on the namespace") oc.CreateNamespaceUDN() ns := oc.Namespace() var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } exutil.By("Create CRD for UDN") var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-75239", namespace: ns, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrd.createLayer2DualStackUDNCRD(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-75658", namespace: ns, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrd.createLayer2SingleStackUDNCRD(oc) } err := waitUDNCRDApplied(oc, ns, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("create sctpClientPod") createResourceFromFile(oc, ns, sctpClientPod) err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient") exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running") exutil.By("create sctpServerPod") createResourceFromFile(oc, ns, sctpServerPod) err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver") exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running") exutil.By("Verify sctp server pod can be accessed for UDN network.") if ipStackType == "dualstack" { sctpServerIPv6, sctpServerIPv4 := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1") verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true) verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true) } else { sctpServerIP, _ := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1") verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true) } })
test case
openshift/openshift-tests-private
7d2a5a48-5d15-4afb-8c87-faa520225b0b
Author:qiowang-High-75254-Check kubelet probes are allowed via default network's LSP for the UDN pods
['"fmt"', '"path/filepath"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:qiowang-High-75254-Check kubelet probes are allowed via default network's LSP for the UDN pods", func() { var ( udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_dualstack2_template.yaml") udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml") udnPodLivenessTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_liveness_template.yaml") udnPodReadinessTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_readiness_template.yaml") udnPodStartupTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_startup_template.yaml") livenessProbePort = 8080 readinessProbePort = 8081 startupProbePort = 1234 ) exutil.By("1. Create privileged namespace") oc.CreateNamespaceUDN() ns := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns) exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" prefix = 24 } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" prefix = 64 } else { ipv4cidr = "10.150.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:100:200::0/48" ipv6prefix = 64 } } var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-ds-75254", namespace: ns, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-ss-75254", namespace: ns, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err := waitUDNCRDApplied(oc, ns, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create a udn hello pod with liveness probe in ns1") pod1 := udnPodWithProbeResource{ name: "hello-pod-ns1-liveness", namespace: ns, label: "hello-pod", port: livenessProbePort, failurethreshold: 1, periodseconds: 1, template: udnPodLivenessTemplate, } pod1.createUdnPodWithProbe(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("4. Capture packets in pod " + pod1.name + ", check liveness probe traffic is allowed via default network") tcpdumpCmd1 := fmt.Sprintf("timeout 5s tcpdump -nni eth0 port %v", pod1.port) cmdTcpdump1, cmdOutput1, _, err1 := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, pod1.name, "--", "bash", "-c", tcpdumpCmd1).Background() defer cmdTcpdump1.Process.Kill() o.Expect(err1).NotTo(o.HaveOccurred()) cmdTcpdump1.Wait() e2e.Logf("The captured packet is %s", cmdOutput1.String()) expPacket1 := strconv.Itoa(pod1.port) + ": Flags [S]" o.Expect(strings.Contains(cmdOutput1.String(), expPacket1)).To(o.BeTrue()) exutil.By("5. Create a udn hello pod with readiness probe in ns1") pod2 := udnPodWithProbeResource{ name: "hello-pod-ns1-readiness", namespace: ns, label: "hello-pod", port: readinessProbePort, failurethreshold: 1, periodseconds: 1, template: udnPodReadinessTemplate, } pod2.createUdnPodWithProbe(oc) waitPodReady(oc, pod2.namespace, pod2.name) exutil.By("6. Capture packets in pod " + pod2.name + ", check readiness probe traffic is allowed via default network") tcpdumpCmd2 := fmt.Sprintf("timeout 5s tcpdump -nni eth0 port %v", pod2.port) cmdTcpdump2, cmdOutput2, _, err2 := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, pod2.name, "--", "bash", "-c", tcpdumpCmd2).Background() defer cmdTcpdump2.Process.Kill() o.Expect(err2).NotTo(o.HaveOccurred()) cmdTcpdump2.Wait() e2e.Logf("The captured packet is %s", cmdOutput2.String()) expPacket2 := strconv.Itoa(pod2.port) + ": Flags [S]" o.Expect(strings.Contains(cmdOutput2.String(), expPacket2)).To(o.BeTrue()) exutil.By("7. Create a udn hello pod with startup probe in ns1") pod3 := udnPodWithProbeResource{ name: "hello-pod-ns1-startup", namespace: ns, label: "hello-pod", port: startupProbePort, failurethreshold: 100, periodseconds: 2, template: udnPodStartupTemplate, } pod3.createUdnPodWithProbe(oc) waitPodReady(oc, pod3.namespace, pod3.name) exutil.By("8. Capture packets in pod " + pod3.name + ", check readiness probe traffic is allowed via default network") tcpdumpCmd3 := fmt.Sprintf("timeout 10s tcpdump -nni eth0 port %v", pod3.port) cmdTcpdump3, cmdOutput3, _, err3 := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, pod3.name, "--", "bash", "-c", tcpdumpCmd3).Background() defer cmdTcpdump3.Process.Kill() o.Expect(err3).NotTo(o.HaveOccurred()) cmdTcpdump3.Wait() e2e.Logf("The captured packet is %s", cmdOutput3.String()) expPacket3 := strconv.Itoa(pod3.port) + ": Flags [S]" o.Expect(strings.Contains(cmdOutput3.String(), expPacket3)).To(o.BeTrue()) })
test case
openshift/openshift-tests-private
c6cb33ba-d450-4fea-ba29-e32ba7d4ee82
Author:anusaxen-Critical-75876-Check udn pods are not isolated if same nad network is shared across two namespaces(layer 2)
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:anusaxen-Critical-75876-Check udn pods are not isolated if same nad network is shared across two namespaces(layer 2)", func() { var ( udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l2-network-" + ns1, "l2-network-" + ns2} nadNS := []string{ns1, ns2} var subnet string if ipStackType == "ipv4single" { subnet = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { subnet = "2010:100:200::0/60" } else { subnet = "10.150.0.0/16,2010:100:200::0/60" } } nad := make([]udnNetDefResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: "l2-network", topology: "layer2", subnet: subnet, mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("create a udn hello pod in ns1") pod1 := udnPodResource{ name: "hello-pod-ns1", namespace: ns1, label: "hello-pod", template: udnPodTemplate, } pod1.createUdnPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("create a udn hello pod in ns2") pod2 := udnPodResource{ name: "hello-pod-ns2", namespace: ns2, label: "hello-pod", template: udnPodTemplate, } pod2.createUdnPod(oc) waitPodReady(oc, pod2.namespace, pod2.name) //udn network connectivity should NOT be isolated CurlPod2PodPassUDN(oc, ns1, pod1.name, ns2, pod2.name) //default network connectivity should be isolated CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name) })
test case
openshift/openshift-tests-private
cd2cc3bb-fe5c-4840-8b15-aa138c64aef9
Author:anusaxen-Critical-75875-Check udn pods isolation on user defined networks (layer 2)
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:anusaxen-Critical-75875-Check udn pods isolation on user defined networks (layer 2)", func() { var ( udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l2-network-" + ns1, "l2-network-" + ns2} nadNS := []string{ns1, ns2} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16", "10.151.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16,2010:100:200::0/60", "10.151.0.0/16,2011:100:200::0/60"} } } nad := make([]udnNetDefResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: "layer2", subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("create a udn hello pod in ns1") pod1 := udnPodResource{ name: "hello-pod-ns1", namespace: ns1, label: "hello-pod", template: udnPodTemplate, } pod1.createUdnPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("create a udn hello pod in ns2") pod2 := udnPodResource{ name: "hello-pod-ns2", namespace: ns2, label: "hello-pod", template: udnPodTemplate, } pod2.createUdnPod(oc) waitPodReady(oc, pod2.namespace, pod2.name) //udn network connectivity should be isolated CurlPod2PodFailUDN(oc, ns1, pod1.name, ns2, pod2.name) //default network connectivity should also be isolated CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name) })
test case
openshift/openshift-tests-private
028bd4e8-20b5-43dc-b07d-804979bd27fa
Author:weliang-NonPreRelease-Longduration-Medium-75624-Feture intergration UDN with multinetworkpolicy. [Disruptive]
['"context"', '"fmt"', '"net"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:weliang-NonPreRelease-Longduration-Medium-75624-Feture intergration UDN with multinetworkpolicy. [Disruptive]", func() { var ( udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") mtu int32 = 1300 buildPruningBaseDir = exutil.FixturePath("testdata", "networking") dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml") multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml") policyFile = filepath.Join(testDataDirUDN, "udn_with_multiplenetworkpolicy.yaml") patchSResource = "networks.operator.openshift.io/cluster" ) exutil.By("Getting the ready-schedulable worker nodes") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 1 { g.Skip("The cluster has no ready node for the testing") } exutil.By("Enabling useMultiNetworkPolicy in the cluster") patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}") patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}") defer func() { patchResourceAsAdmin(oc, patchSResource, patchInfoFalse) exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy") waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False") waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False") }() patchResourceAsAdmin(oc, patchSResource, patchInfoTrue) waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False") waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False") exutil.By("Creating a new namespace for this MultiNetworkPolicy testing") origContxt, contxtErr := oc.Run("config").Args("current-context").Output() o.Expect(contxtErr).NotTo(o.HaveOccurred()) defer func() { useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute() o.Expect(useContxtErr).NotTo(o.HaveOccurred()) }() ns1 := "project75624" defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute() nserr1 := oc.Run("new-project").Args(ns1).Execute() o.Expect(nserr1).NotTo(o.HaveOccurred()) _, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output() o.Expect(proerr1).NotTo(o.HaveOccurred()) exutil.By("Creating NAD1 for ns1") nad1 := udnNetDefResource{ nadname: "udn-primary-net", namespace: ns1, nad_network_name: "udn-primary-net", topology: "layer3", subnet: "10.100.0.0/16/24", mtu: mtu, net_attach_def_name: ns1 + "/" + "udn-primary-net", role: "primary", template: udnNadtemplate, } defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad1.nadname, "-n", ns1).Execute() nad1.createUdnNad(oc) exutil.By("Verifying the configured NAD1") if checkNAD(oc, ns1, nad1.nadname) { e2e.Logf("The correct network-attach-definition: %v is created!", nad1.nadname) } else { e2e.Failf("The correct network-attach-definition: %v is not created!", nad1.nadname) } exutil.By("Creating NAD2 for ns1") nad2 := dualstackNAD{ nadname: "dualstack", namespace: ns1, plugintype: "macvlan", mode: "bridge", ipamtype: "whereabouts", ipv4range: "192.168.10.0/24", ipv6range: "fd00:dead:beef:10::/64", ipv4rangestart: "", ipv4rangeend: "", ipv6rangestart: "", ipv6rangeend: "", template: dualstackNADTemplate, } defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad2.nadname, "-n", ns1).Execute() nad2.createDualstackNAD(oc) exutil.By("Verifying the configured NAD2") if checkNAD(oc, ns1, nad2.nadname) { e2e.Logf("The correct network-attach-definition: %v is created!", nad2.nadname) } else { e2e.Failf("The correct network-attach-definition: %v is not created!", nad2.nadname) } nadName := "dualstack" nsWithnad := ns1 + "/" + nadName exutil.By("Configuring pod1 for additional network using NAD2") pod1 := testMultihomingPod{ name: "blue-pod-1", namespace: ns1, podlabel: "blue-pod", nadname: nsWithnad, nodename: nodeList.Items[0].Name, podenvname: "", template: multihomingPodTemplate, } pod1.createTestMultihomingPod(oc) exutil.By("Configuring pod2 for additional network using NAD2") pod2 := testMultihomingPod{ name: "blue-pod-2", namespace: ns1, podlabel: "blue-pod", nadname: nsWithnad, nodename: nodeList.Items[0].Name, podenvname: "", template: multihomingPodTemplate, } pod2.createTestMultihomingPod(oc) exutil.By("Verifying both pods with same label of blue-pod are ready for testing") o.Expect(waitForPodWithLabelReady(oc, ns1, "name=blue-pod")).NotTo(o.HaveOccurred()) exutil.By("Configuring pod3 for additional network using NAD2") pod3 := testMultihomingPod{ name: "red-pod-1", namespace: ns1, podlabel: "red-pod", nadname: nsWithnad, nodename: nodeList.Items[0].Name, podenvname: "", template: multihomingPodTemplate, } pod3.createTestMultihomingPod(oc) exutil.By("Configuring pod4 for additional network NAD2") pod4 := testMultihomingPod{ name: "red-pod-2", namespace: ns1, podlabel: "red-pod", nadname: nsWithnad, nodename: nodeList.Items[0].Name, podenvname: "", template: multihomingPodTemplate, } pod4.createTestMultihomingPod(oc) exutil.By("Verifying both pods with same label of red-pod are ready for testing") o.Expect(waitForPodWithLabelReady(oc, ns1, "name=red-pod")).NotTo(o.HaveOccurred()) exutil.By("Getting the deployed pods' names") podList, podListErr := exutil.GetAllPods(oc, ns1) o.Expect(podListErr).NotTo(o.HaveOccurred()) exutil.By("Getting the IPs of the pod1's secondary interface") pod1v4, pod1v6 := getPodMultiNetwork(oc, ns1, podList[0]) exutil.By("Getting the IPs of the pod2's secondary interface") pod2v4, pod2v6 := getPodMultiNetwork(oc, ns1, podList[1]) exutil.By("Getting the IPs of the pod3's secondary interface") pod3v4, pod3v6 := getPodMultiNetwork(oc, ns1, podList[2]) exutil.By("Getting the IPs of the pod4's secondary interface") pod4v4, pod4v6 := getPodMultiNetwork(oc, ns1, podList[3]) exutil.By("Verifying the curling should pass before applying multinetworkpolicy") curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod1v4, pod1v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod2v4, pod2v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod1v4, pod1v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod2v4, pod2v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod4v4, pod4v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod3v4, pod3v6) exutil.By("Creating the ingress-allow-same-podSelector-with-same-namespaceSelector policy in ns1") defer removeResource(oc, true, true, "multi-networkpolicy", "ingress-allow-same-podselector-with-same-namespaceselector", "-n", ns1) oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute() output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Verifying the ingress-allow-same-podSelector-with-same-namespaceSelector policy is created in ns1") o.Expect(output).To(o.ContainSubstring("ingress-allow-same-podselector-with-same-namespaceselector")) exutil.By("Verifying the configured multinetworkpolicy will deny or allow the traffics as policy defined") curlPod2PodMultiNetworkFail(oc, ns1, podList[2], pod1v4, pod1v6) curlPod2PodMultiNetworkFail(oc, ns1, podList[2], pod2v4, pod2v6) curlPod2PodMultiNetworkFail(oc, ns1, podList[3], pod1v4, pod1v6) curlPod2PodMultiNetworkFail(oc, ns1, podList[3], pod2v4, pod2v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod4v4, pod4v6) curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod3v4, pod3v6) })
test case
openshift/openshift-tests-private
ae5a309d-d107-4b74-8ba8-672c2b182a93
Author:huirwang-NonPreRelease-Longduration-High-75503-Overlapping pod CIDRs/IPs are allowed in different primary NADs.
['"context"', '"fmt"', '"net"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:huirwang-NonPreRelease-Longduration-High-75503-Overlapping pod CIDRs/IPs are allowed in different primary NADs.", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") mtu int32 = 1300 ) nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes.") } ipStackType := checkIPStackType(oc) exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Obtain 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2} nadNS := []string{ns1, ns2} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/26/29", "10.150.0.0/26/29"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2010:100:200::0/60"} } else { subnet = []string{"10.150.0.0/26/29,2010:100:200::0/60", "10.150.0.0/26/29,2010:100:200::0/60"} } } nad := make([]udnNetDefResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: "layer3", subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) exutil.By("Verifying the configued NetworkAttachmentDefinition") if checkNAD(oc, nadNS[i], nadResourcename[i]) { e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename[i]) } else { e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename[i]) } } exutil.By("Create replica pods in ns1") createResourceFromFile(oc, ns1, testPodFile) numberOfPods := "8" err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas="+numberOfPods, "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = waitForPodWithLabelReady(oc, ns1, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testpodNS1Names := getPodName(oc, ns1, "name=test-pods") e2e.Logf("Collect all the pods IPs in namespace %s", ns1) var podsNS1IP1, podsNS1IP2 []string for i := 0; i < len(testpodNS1Names); i++ { podIP1, podIP2 := getPodIPUDN(oc, ns1, testpodNS1Names[i], "ovn-udn1") if podIP2 != "" { podsNS1IP2 = append(podsNS1IP2, podIP2) } podsNS1IP1 = append(podsNS1IP1, podIP1) } e2e.Logf("The IPs of pods in first namespace %s for UDN:\n %v %v", ns1, podsNS1IP1, podsNS1IP2) exutil.By("create replica pods in ns2") createResourceFromFile(oc, ns2, testPodFile) err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas="+numberOfPods, "-n", ns2).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testpodNS2Names := getPodName(oc, ns2, "name=test-pods") e2e.Logf("Collect all the pods IPs in namespace %s", ns2) var podsNS2IP1, podsNS2IP2 []string for i := 0; i < len(testpodNS2Names); i++ { podIP1, podIP2 := getPodIPUDN(oc, ns2, testpodNS2Names[i], "ovn-udn1") if podIP2 != "" { podsNS2IP2 = append(podsNS2IP2, podIP2) } podsNS2IP1 = append(podsNS2IP1, podIP1) } e2e.Logf("The IPs of pods in second namespace %s for UDN:\n %v %v", ns2, podsNS2IP1, podsNS2IP2) testpodNS1NamesLen := len(testpodNS1Names) podsNS1IP1Len := len(podsNS1IP1) podsNS1IP2Len := len(podsNS1IP2) exutil.By("Verify udn network should be able to access in same network.") for i := 0; i < testpodNS1NamesLen; i++ { for j := 0; j < podsNS1IP1Len; j++ { if podsNS1IP2Len > 0 && podsNS1IP2[j] != "" { _, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS1IP2[j], "8080")) o.Expect(err).NotTo(o.HaveOccurred()) } _, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS1IP1[j], "8080")) o.Expect(err).NotTo(o.HaveOccurred()) } } podsNS2IP1Len := len(podsNS2IP1) podsNS2IP2Len := len(podsNS2IP2) exutil.By("Verify udn network should be isolated in different network.") for i := 0; i < testpodNS1NamesLen; i++ { for j := 0; j < podsNS2IP1Len; j++ { if podsNS2IP2Len > 0 && podsNS2IP2[j] != "" { if contains(podsNS1IP2, podsNS2IP2[j]) { // as the destination IP in ns2 is same as one in NS1, then it will be able to access that IP and has been executed in previous steps. continue } else { _, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS2IP2[j], "8080")) o.Expect(err).To(o.HaveOccurred()) } } if contains(podsNS1IP1, podsNS2IP1[j]) { // as the destination IP in ns2 is same as one in NS1, then it will be able to access that IP and has been executed in previous steps.. continue } else { _, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS2IP1[j], "8080")) o.Expect(err).To(o.HaveOccurred()) } } } })
test case
openshift/openshift-tests-private
a1c0e26d-b765-40d4-b84d-6fc9a5500d68
Author:meinli-High-75880-Check udn pods connection and isolation on user defined networks when NADs are created via CRD(Layer 3)
['"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-High-75880-Check udn pods connection and isolation on user defined networks when NADs are created via CRD(Layer 3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() exutil.By("3. Create CRD for UDN") udnResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2} udnNS := []string{ns1, ns2} var cidr, ipv4cidr, ipv6cidr []string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} prefix = 24 } else { if ipStackType == "ipv6single" { cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} prefix = 64 } else { ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} ipv4prefix = 24 ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} ipv6prefix = 64 } } udncrd := make([]udnCRDResource, 2) for i := 0; i < 2; i++ { if ipStackType == "dualstack" { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, IPv4cidr: ipv4cidr[i], IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr[i], IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd[i].createUdnCRDDualStack(oc) } else { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, cidr: cidr[i], prefix: prefix, template: udnCRDSingleStack, } udncrd[i].createUdnCRDSingleStack(oc) } err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("4. Create replica pods in ns1") createResourceFromFile(oc, ns1, testPodFile) err := waitForPodWithLabelReady(oc, ns1, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testpodNS1Names := getPodName(oc, ns1, "name=test-pods") CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1]) exutil.By("5. create replica pods in ns2") createResourceFromFile(oc, ns2, testPodFile) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testpodNS2Names := getPodName(oc, ns2, "name=test-pods") exutil.By("6. verify isolation on user defined networks") //udn network connectivity should be isolated CurlPod2PodFailUDN(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0]) //default network connectivity should also be isolated CurlPod2PodFail(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0]) })
test case
openshift/openshift-tests-private
c6eba7e0-1a44-48e4-863d-c62f053004c9
Author:meinli-High-75881-Check udn pods connection and isolation on user defined networks when NADs are created via CRD(Layer 2)
['"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-High-75881-Check udn pods connection and isolation on user defined networks when NADs are created via CRD(Layer 2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() exutil.By("3. Create CRD for UDN") udnResourcename := []string{"l2-network-" + ns1, "l2-network-" + ns2} udnNS := []string{ns1, ns2} var cidr, ipv4cidr, ipv6cidr []string if ipStackType == "ipv4single" { cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} } else { if ipStackType == "ipv6single" { cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} } else { ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} } } udncrd := make([]udnCRDResource, 2) for i := 0; i < 2; i++ { if ipStackType == "dualstack" { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, IPv4cidr: ipv4cidr[i], IPv6cidr: ipv6cidr[i], template: udnCRDdualStack, } udncrd[i].createLayer2DualStackUDNCRD(oc) } else { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, cidr: cidr[i], template: udnCRDSingleStack, } udncrd[i].createLayer2SingleStackUDNCRD(oc) } err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("4. Create replica pods in ns1") createResourceFromFile(oc, ns1, testPodFile) err := waitForPodWithLabelReady(oc, ns1, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testpodNS1Names := getPodName(oc, ns1, "name=test-pods") CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1]) exutil.By("5. create replica pods in ns2") createResourceFromFile(oc, ns2, testPodFile) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testpodNS2Names := getPodName(oc, ns2, "name=test-pods") exutil.By("6. verify isolation on user defined networks") //udn network connectivity should be isolated CurlPod2PodFailUDN(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0]) //default network connectivity should also be isolated CurlPod2PodFail(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0]) })
test case
openshift/openshift-tests-private
189cba67-eccf-4430-9058-220302eb2019
Author:asood-High-75899-Validate L2 and L3 Pod2Egress traffic in shared and local gateway mode
['"fmt"', '"path/filepath"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:asood-High-75899-Validate L2 and L3 Pod2Egress traffic in shared and local gateway mode", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDL2dualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnCRDL2SingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml") udnCRDL3dualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnCRDL3SingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") udnNadtemplate = filepath.Join(buildPruningBaseDir, "udn/udn_nad_template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") mtu int32 = 1300 pingIPv4Cmd = "ping -c 2 8.8.8.8" pingIPv6Cmd = "ping6 -c 2 2001:4860:4860::8888" pingDNSCmd = "ping -c 2 www.google.com" udnNS = []string{} pingCmds = []string{} ) if checkProxy(oc) { g.Skip("This cluster has proxy configured, egress access cannot be tested on the cluster, skip the test.") } ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" || ipStackType == "ipv6single" { if !checkIPv6PublicAccess(oc) { g.Skip("This cluster is dualstack/IPv6 with no access to public websites, egress access cannot be tested on the cluster, skip the test.") } } e2e.Logf("The gateway mode of the cluster is %s", getOVNGatewayMode(oc)) exutil.By("1. Create four UDN namespaces") for i := 0; i < 4; i++ { oc.CreateNamespaceUDN() udnNS = append(udnNS, oc.Namespace()) } var cidr, ipv4cidr, ipv6cidr []string var prefix, ipv4prefix, ipv6prefix int32 pingCmds = append(pingCmds, pingDNSCmd) if ipStackType == "ipv4single" { cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} prefix = 24 pingCmds = append(pingCmds, pingIPv4Cmd) } else { if ipStackType == "ipv6single" { cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} prefix = 64 pingCmds = append(pingCmds, pingIPv6Cmd) } else { ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} ipv4prefix = 24 ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} ipv6prefix = 64 pingCmds = append(pingCmds, pingIPv4Cmd) pingCmds = append(pingCmds, pingIPv6Cmd) } } exutil.By("2. Create CRD for UDN in first two namespaces") udnResourcename := []string{"l2-network-" + udnNS[0], "l3-network-" + udnNS[1]} udnDSTemplate := []string{udnCRDL2dualStack, udnCRDL3dualStack} udnSSTemplate := []string{udnCRDL2SingleStack, udnCRDL3SingleStack} udncrd := make([]udnCRDResource, 2) for i := 0; i < 2; i++ { if ipStackType == "dualstack" { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, IPv4cidr: ipv4cidr[i], IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr[i], IPv6prefix: ipv6prefix, template: udnDSTemplate[i], } switch i { case 0: udncrd[0].createLayer2DualStackUDNCRD(oc) case 1: udncrd[1].createUdnCRDDualStack(oc) } } else { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, cidr: cidr[i], prefix: prefix, template: udnSSTemplate[i], } switch i { case 0: udncrd[0].createLayer2SingleStackUDNCRD(oc) case 1: udncrd[1].createUdnCRDSingleStack(oc) } } err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("3. Create NAD for UDN in last two namespaces") udnNADResourcename := []string{"l2-network-" + udnNS[2], "l3-network-" + udnNS[3]} topology := []string{"layer2", "layer3"} udnnad := make([]udnNetDefResource, 2) for i := 0; i < 2; i++ { udnnad[i] = udnNetDefResource{ nadname: udnNADResourcename[i], namespace: udnNS[i+2], nad_network_name: udnNADResourcename[i], topology: topology[i], subnet: "", mtu: mtu, net_attach_def_name: fmt.Sprintf("%s/%s", udnNS[i+2], udnNADResourcename[i]), role: "primary", template: udnNadtemplate, } if ipStackType == "dualstack" { udnnad[i].subnet = fmt.Sprintf("%s,%s", ipv4cidr[i], ipv6cidr[i]) } else { udnnad[i].subnet = cidr[i] } udnnad[i].createUdnNad(oc) } exutil.By("4. Create replica pods in namespaces") for _, ns := range udnNS { e2e.Logf("Validating in %s namespace", ns) createResourceFromFile(oc, ns, testPodFile) err := waitForPodWithLabelReady(oc, ns, "name=test-pods") exutil.AssertWaitPollNoErr(err, "Pods with label name=test-pods not ready") testpodNSNames := getPodName(oc, ns, "name=test-pods") CurlPod2PodPassUDN(oc, ns, testpodNSNames[0], ns, testpodNSNames[1]) for _, pingCmd := range pingCmds { pingResponse, err := execCommandInSpecificPod(oc, ns, testpodNSNames[0], pingCmd) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(pingResponse, "0% packet loss")).To(o.BeTrue()) } } })
test case
openshift/openshift-tests-private
4bd3167f-77c5-440a-bad3-7414deaf8507
Author:meinli-High-75955-Verify UDN failed message when user defined join subnet overlaps user defined subnet (Layer3)
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-High-75955-Verify UDN failed message when user defined join subnet overlaps user defined subnet (Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDL3dualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnCRDL3SingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") UserDefinedPrimaryNetworkJoinSubnetV4 = "100.65.0.0/16" UserDefinedPrimaryNetworkJoinSubnetV6 = "fd99::/48" mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1. Create namespace") oc.CreateNamespaceUDN() ns := oc.Namespace() exutil.By("2. Create CRD for UDN") var udncrd udnCRDResource var cidr string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = UserDefinedPrimaryNetworkJoinSubnetV4 prefix = 24 } else { if ipStackType == "ipv6single" { cidr = UserDefinedPrimaryNetworkJoinSubnetV6 prefix = 64 } else { ipv4prefix = 24 ipv6prefix = 64 } } if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-75995", namespace: ns, role: "Primary", mtu: mtu, IPv4cidr: UserDefinedPrimaryNetworkJoinSubnetV4, IPv4prefix: ipv4prefix, IPv6cidr: UserDefinedPrimaryNetworkJoinSubnetV6, IPv6prefix: ipv6prefix, template: udnCRDL3dualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-75995", namespace: ns, role: "Primary", mtu: mtu, cidr: cidr, prefix: prefix, template: udnCRDL3SingleStack, } udncrd.createUdnCRDSingleStack(oc) } err := waitUDNCRDApplied(oc, ns, udncrd.crdname) o.Expect(err).To(o.HaveOccurred()) exutil.By("3. Check UDN failed message") output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("userdefinednetwork.k8s.ovn.org", udncrd.crdname, "-n", ns).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.Or( o.ContainSubstring(fmt.Sprintf("illegal network configuration: user defined join subnet \"100.65.0.0/16\" overlaps user defined subnet \"%s\"", UserDefinedPrimaryNetworkJoinSubnetV4)), o.ContainSubstring(fmt.Sprintf("illegal network configuration: user defined join subnet \"fd99::/64\" overlaps user defined subnet \"%s\"", UserDefinedPrimaryNetworkJoinSubnetV6)))) })
test case
openshift/openshift-tests-private
0eead772-af28-4c0d-b245-f537daf9e0c2
Author:anusaxen-Critical-75984-Check udn pods isolation on user defined networks post OVN gateway migration
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:anusaxen-Critical-75984-Check udn pods isolation on user defined networks post OVN gateway migration", func() { var ( udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() exutil.By("3. Create 3rd namespace") oc.CreateNamespaceUDN() ns3 := oc.Namespace() exutil.By("4. Create 4th namespace") oc.CreateNamespaceUDN() ns4 := oc.Namespace() nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2, "l2-network-" + ns3, "l2-network-" + ns4} nadNS := []string{ns1, ns2, ns3, ns4} topo := []string{"layer3", "layer3", "layer2", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.151.0.0/16/24", "10.152.0.0/16", "10.153.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2012:100:200::0/60", "2013:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60", "10.153.0.0/16,2013:100:200::0/60"} } } nad := make([]udnNetDefResource, 4) for i := 0; i < 4; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } pod := make([]udnPodResource, 4) for i := 0; i < 4; i++ { exutil.By("create a udn hello pods in ns1 ns2 ns3 and ns4") pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("create another udn hello pod in ns1 to ensure layer3 conectivity post migration among'em") pod_ns1 := udnPodResource{ name: "hello-pod-ns1", namespace: nadNS[0], label: "hello-pod", template: udnPodTemplate, } pod_ns1.createUdnPod(oc) waitPodReady(oc, pod_ns1.namespace, pod_ns1.name) exutil.By("create another udn hello pod in ns3 to ensure layer2 conectivity post migration among'em") pod_ns3 := udnPodResource{ name: "hello-pod-ns3", namespace: nadNS[2], label: "hello-pod", template: udnPodTemplate, } pod_ns3.createUdnPod(oc) waitPodReady(oc, pod_ns3.namespace, pod_ns3.name) //need to find out original mode cluster is on so that we can revert back to same post test var desiredMode string origMode := getOVNGatewayMode(oc) if origMode == "local" { desiredMode = "shared" } else { desiredMode = "local" } e2e.Logf("Cluster is currently on gateway mode %s", origMode) e2e.Logf("Desired mode is %s", desiredMode) defer switchOVNGatewayMode(oc, origMode) switchOVNGatewayMode(oc, desiredMode) //udn network connectivity for layer3 should be isolated CurlPod2PodFailUDN(oc, ns1, pod[0].name, ns2, pod[1].name) //default network connectivity for layer3 should also be isolated CurlPod2PodFail(oc, ns1, pod[0].name, ns2, pod[1].name) //udn network connectivity for layer2 should be isolated CurlPod2PodFailUDN(oc, ns3, pod[2].name, ns4, pod[3].name) //default network connectivity for layer2 should also be isolated CurlPod2PodFail(oc, ns3, pod[2].name, ns4, pod[3].name) //ensure udn network connectivity for layer3 should be there CurlPod2PodPassUDN(oc, ns1, pod[0].name, ns1, pod_ns1.name) //ensure udn network connectivity for layer2 should be there CurlPod2PodPassUDN(oc, ns3, pod[2].name, ns3, pod_ns3.name) })
test case
openshift/openshift-tests-private
953416ad-dc45-4de1-9e54-075756942e17
Author:anusaxen-NonPreRelease-Longduration-Critical-76939-Check udn pods isolation on a scaled node [Disruptive]
['"path/filepath"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:anusaxen-NonPreRelease-Longduration-Critical-76939-Check udn pods isolation on a scaled node [Disruptive]", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") udnPodTemplateNode = filepath.Join(testDataDirUDN, "udn_test_pod_template_node.yaml") udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) o.Expect(ipStackType).NotTo(o.BeEmpty()) if ipStackType != "ipv4single" { g.Skip("This case requires IPv4 single stack cluster") } clusterinfra.SkipConditionally(oc) clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.OpenStack) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() udnResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2} udnNS := []string{ns1, ns2} var cidr []string var prefix int32 cidr = []string{"10.150.0.0/16", "10.151.0.0/16"} prefix = 24 udncrd := make([]udnCRDResource, 2) for i := 0; i < 2; i++ { udncrd[i] = udnCRDResource{ crdname: udnResourcename[i], namespace: udnNS[i], role: "Primary", mtu: mtu, cidr: cidr[i], prefix: prefix, template: udnCRDSingleStack, } udncrd[i].createUdnCRDSingleStack(oc) err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("create a udn hello pod in ns1") pod1 := udnPodResource{ name: "hello-pod-ns1", namespace: ns1, label: "hello-pod", template: udnPodTemplate, } pod1.createUdnPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) //following code block to scale up a node on cluster exutil.By("1. Create a new machineset, get the new node created\n") clusterinfra.SkipConditionally(oc) infrastructureName := clusterinfra.GetInfrastructureName(oc) machinesetName := infrastructureName + "-76939" ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1} defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName) defer ms.DeleteMachineSet(oc) ms.CreateMachineSet(oc) clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName) machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName) o.Expect(len(machineName)).ShouldNot(o.Equal(0)) nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0]) e2e.Logf("Get nodeName: %v", nodeName) checkNodeStatus(oc, nodeName, "Ready") exutil.By("create a udn hello pod in ns2") pod2 := udnPodResourceNode{ name: "hello-pod-ns2", namespace: ns2, label: "hello-pod", nodename: nodeName, template: udnPodTemplateNode, } pod2.createUdnPodNode(oc) waitPodReady(oc, pod2.namespace, pod2.name) //udn network connectivity should be isolated CurlPod2PodFailUDN(oc, ns1, pod1.name, ns2, pod2.name) //default network connectivity should also be isolated CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name) })
test case
openshift/openshift-tests-private
92bb3ed8-d893-4801-b0c6-b0f87bf1e567
Author:meinli-NonHyperShiftHOST-High-77517-Validate pod2pod connection within and across node when creating UDN with Secondary role from same namespace (Layer3)
['"context"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-NonHyperShiftHOST-High-77517-Validate pod2pod connection within and across node when creating UDN with Secondary role from same namespace (Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml") mtu int32 = 9000 ) ipStackType := checkIPStackType(oc) exutil.By("1. Get namespace and worker node") oc.CreateNamespaceUDN() ns := oc.Namespace() nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } exutil.By("2. create UDN with Secondary role and Primary role") var cidr, ipv4cidr, ipv6cidr []string var prefix, ipv4prefix, ipv6prefix int32 cidr = []string{"10.150.0.0/16", "10.200.0.0/16"} prefix = 24 if ipStackType == "ipv6single" { cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} prefix = 64 } ipv4cidr = []string{"10.150.0.0/16", "10.200.0.0/16"} ipv4prefix = 24 ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} ipv6prefix = 64 var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "l3-secondary-77517", namespace: ns, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr[0], IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr[0], IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "l3-secondary-77517", namespace: ns, role: "Secondary", mtu: mtu, cidr: cidr[0], prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) // create Primary UDN createGeneralUDNCRD(oc, ns, "l3-primary-77517", ipv4cidr[1], ipv6cidr[1], cidr[1], "layer3") exutil.By("3. Create 2 pods within the same node and 1 pod across with different nodes") pods := make([]udnPodSecNADResourceNode, 3) var podNames []string for i := 0; i < 2; i++ { pods[i] = udnPodSecNADResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nadname: udncrd.crdname, nodename: nodeList.Items[i].Name, template: udnPodTemplate, } pods[i].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns, pods[i].name) podNames = append(podNames, pods[i].name) } pods[2] = udnPodSecNADResourceNode{ name: "hello-pod-2", namespace: ns, nadname: udncrd.crdname, nodename: nodeList.Items[1].Name, template: udnPodTemplate, } pods[2].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns, pods[2].name) podNames = append(podNames, pods[2].name) exutil.By("4. Check pods subnet overlap within and across nodes") o.Expect(checkPodCIDRsOverlap(oc, ns, ipStackType, []string{podNames[2], podNames[0]}, "net1")).Should(o.BeFalse()) o.Expect(checkPodCIDRsOverlap(oc, ns, ipStackType, []string{podNames[2], podNames[1]}, "net1")).Should(o.BeTrue()) exutil.By("5. Validate pod2pod connection within the same node and across with different nodes") CurlUDNPod2PodPassMultiNetwork(oc, ns, ns, podNames[2], "net1", podNames[0], "net1") CurlUDNPod2PodPassMultiNetwork(oc, ns, ns, podNames[2], "net1", podNames[1], "net1") exutil.By("6. Validate isolation between Primary and Secondary interface") CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "ovn-udn1", podNames[1], "net1") CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "net1", podNames[1], "ovn-udn1") })
test case
openshift/openshift-tests-private
72b85127-a43b-4b6f-8cec-ee9531a76111
Author:meinli-NonHyperShiftHOST-High-77519-Validate pod2pod isolation within and across nodes when creating UDN with Secondary role from different namespaces (Layer3)
['"context"', '"path/filepath"', '"strconv"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-NonHyperShiftHOST-High-77519-Validate pod2pod isolation within and across nodes when creating UDN with Secondary role from different namespaces (Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml") mtu int32 = 9000 ) ipStackType := checkIPStackType(oc) exutil.By("1. Get namespace and worker node") ns1 := oc.Namespace() nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } exutil.By("2. create UDN with Secondary role in ns1") var cidr, ipv4cidr, ipv6cidr []string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = []string{"10.150.0.0/16", "10.200.0.0/16"} prefix = 24 } else { if ipStackType == "ipv6single" { cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} prefix = 64 } else { ipv4cidr = []string{"10.150.0.0/16", "10.200.0.0/16"} ipv4prefix = 24 ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} ipv6prefix = 64 } } var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "l3-secondary", namespace: ns1, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr[0], IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr[0], IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "l3-secondary", namespace: ns1, role: "Secondary", mtu: mtu, cidr: cidr[0], prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. validate Layer3 router is created in OVN") ovnMasterPodName := getOVNKMasterOVNkubeNode(oc) o.Expect(ovnMasterPodName).NotTo(o.BeEmpty()) o.Eventually(func() bool { return checkOVNRouter(oc, "l3.secondary_ovn_cluster_router", ovnMasterPodName) }, 20*time.Second, 5*time.Second).Should(o.BeTrue(), "The correct OVN router is not created") exutil.By("4. create 1 pod with secondary annotation in ns1") var podNames []string // create 1 pod in ns1 pod1 := udnPodSecNADResourceNode{ name: "hello-pod-ns1", namespace: ns1, nadname: udncrd.crdname, nodename: nodeList.Items[0].Name, template: udnPodTemplate, } pod1.createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns1, pod1.name) podNames = append(podNames, pod1.name) exutil.By("5. create UDN with secondary role in ns2") // create 2nd namespace oc.SetupProject() ns2 := oc.Namespace() udncrd.namespace = ns2 if ipStackType == "dualstack" { udncrd.IPv4cidr = ipv4cidr[1] udncrd.IPv6cidr = ipv6cidr[1] udncrd.createUdnCRDDualStack(oc) } else { udncrd.cidr = cidr[1] udncrd.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. create 2 pods with secondary annotation in ns2") pods := make([]udnPodSecNADResourceNode, 2) //create 2 pods in ns2 for i := 0; i < 2; i++ { pods[i] = udnPodSecNADResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns2, nadname: udncrd.crdname, nodename: nodeList.Items[i].Name, template: udnPodTemplate, } pods[i].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns2, pods[i].name) podNames = append(podNames, pods[i].name) } exutil.By("7. Validate pod2pod isolation from secondary network in different namespaces") CurlUDNPod2PodFailMultiNetwork(oc, ns1, ns2, podNames[0], "net1", podNames[1], "net1") CurlUDNPod2PodFailMultiNetwork(oc, ns1, ns2, podNames[0], "net1", podNames[2], "net1") CurlUDNPod2PodPassMultiNetwork(oc, ns2, ns2, podNames[1], "net1", podNames[2], "net1") })
test case
openshift/openshift-tests-private
8fa098df-edfd-45b3-9ec4-835706a610b2
Author:meinli-NonHyperShiftHOST-High-77563-Validate pod2pod connection within and across node when creating UDN with Secondary role from same namespace (Layer2)
['"context"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-NonHyperShiftHOST-High-77563-Validate pod2pod connection within and across node when creating UDN with Secondary role from same namespace (Layer2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml") mtu int32 = 9000 podenvname = "Hello OpenShift" ) exutil.By("1. Get namespace and worker node") oc.CreateNamespaceUDN() ns := oc.Namespace() nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } exutil.By("2. create Layer2 UDN with Secondary role and Primary role") ipStackType := checkIPStackType(oc) var cidr string var ipv4cidr, ipv6cidr []string cidr = "10.200.0.0/16" if ipStackType == "ipv6single" { cidr = "2011:100:200::0/60" } ipv4cidr = []string{"10.150.0.0/16", "10.200.0.0/16"} ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"} udncrd := udnCRDResource{ crdname: "l2-secondary", namespace: ns, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr[0], IPv6cidr: ipv6cidr[0], template: udnCRDdualStack, } udncrd.createLayer2DualStackUDNCRD(oc) err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) createGeneralUDNCRD(oc, ns, "l2-primary-network", ipv4cidr[1], ipv6cidr[1], cidr, "layer2") exutil.By("3. create 2 pods within the same node and 1 pod across with different nodes") pods := make([]udnPodSecNADResourceNode, 3) var podNames []string for i := 0; i < 2; i++ { pods[i] = udnPodSecNADResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nadname: udncrd.crdname, nodename: nodeList.Items[i].Name, template: udnPodTemplate, } pods[i].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns, pods[i].name) podNames = append(podNames, pods[i].name) } pods[2] = udnPodSecNADResourceNode{ name: "hello-pod-2", namespace: ns, nadname: udncrd.crdname, nodename: nodeList.Items[1].Name, template: udnPodTemplate, } pods[2].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns, pods[2].name) podNames = append(podNames, pods[2].name) exutil.By("4. Check pods subnet overlap within and across nodes") o.Expect(checkPodCIDRsOverlap(oc, ns, "dualstack", []string{podNames[2], podNames[0]}, "net1")).Should(o.BeTrue()) o.Expect(checkPodCIDRsOverlap(oc, ns, "dualstack", []string{podNames[2], podNames[1]}, "net1")).Should(o.BeTrue()) exutil.By("5. Validate pod2pod connection (dual stack) within the same node") pod0IPv4, pod0IPv6 := getPodMultiNetwork(oc, ns, podNames[0]) e2e.Logf("Pod0 IPv4 address is: %v, IPv6 address is: %v", pod0IPv4, pod0IPv6) CurlMultusPod2PodPass(oc, ns, podNames[2], pod0IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod0IPv6, "net1", podenvname) exutil.By("6. Validate pod2pod connection (dual stack) across with different nodes") pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[1]) e2e.Logf("Pod1 IPv4 address is: %v, IPv6 address is: %v", pod1IPv4, pod1IPv6) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname) exutil.By("7. Validate isolation between primary and secondary role") CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "ovn-udn1", podNames[1], "net1") CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "net1", podNames[1], "ovn-udn1") })
test case
openshift/openshift-tests-private
1c146ad1-9fd2-4954-8095-2f3544b06999
Author:meinli-NonHyperShiftHOST-High-77564-Validate pod2pod isolation within and across node when creating UDN with Secondary role from different namespaces (Layer2)
['"context"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-NonHyperShiftHOST-High-77564-Validate pod2pod isolation within and across node when creating UDN with Secondary role from different namespaces (Layer2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml") mtu int32 = 9000 podenvname = "Hello OpenShift" ) exutil.By("1. Get namespace and worker node") ns1 := oc.Namespace() nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } exutil.By("2. Create Layer2 UDN with Secondary role in ns1") ipv4cidr := []string{"10.150.0.0/16", "10.200.0.0/16"} ipv6cidr := []string{"2010:100:200::0/60", "2011:100:200::0/60"} udncrd1 := udnCRDResource{ crdname: "l2-secondary-ns1", namespace: ns1, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr[0], IPv6cidr: ipv6cidr[0], template: udnCRDdualStack, } udncrd1.createLayer2DualStackUDNCRD(oc) err = waitUDNCRDApplied(oc, udncrd1.namespace, udncrd1.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. create 1 pod with secondary annotation in ns1") var podNames []string // create 1 pod in ns1 pod1 := udnPodSecNADResourceNode{ name: "hello-pod-ns1", namespace: ns1, nadname: udncrd1.crdname, nodename: nodeList.Items[0].Name, template: udnPodTemplate, } pod1.createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns1, pod1.name) podNames = append(podNames, pod1.name) exutil.By("4. create Layer2 UDN with secondary role in ns2") // create 2nd namespace oc.SetupProject() ns2 := oc.Namespace() udncrd2 := udnCRDResource{ crdname: "l2-secondary-ns2", namespace: ns2, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr[1], IPv6cidr: ipv6cidr[1], template: udnCRDdualStack, } udncrd2.createLayer2DualStackUDNCRD(oc) err = waitUDNCRDApplied(oc, udncrd2.namespace, udncrd2.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create pods with secondary annotation in ns2") pods := make([]udnPodSecNADResourceNode, 2) //create 2 pods in ns2 for i := 0; i < 2; i++ { pods[i] = udnPodSecNADResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns2, nadname: udncrd2.crdname, nodename: nodeList.Items[i].Name, template: udnPodTemplate, } pods[i].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns2, pods[i].name) podNames = append(podNames, pods[i].name) } exutil.By("6. validate pod2pod isolation (dual stack) within the same node") pod0IPv4, pod0IPv6 := getPodMultiNetwork(oc, ns2, podNames[1]) e2e.Logf("Pod0 IPv4 address is: %v, IPv6 address is: %v", pod0IPv4, pod0IPv6) CurlMultusPod2PodFail(oc, ns1, podNames[0], pod0IPv4, "net1", podenvname) CurlMultusPod2PodFail(oc, ns1, podNames[0], pod0IPv6, "net1", podenvname) exutil.By("7. validate pod2pod isolation (dual stack) across with different node") pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns2, podNames[2]) e2e.Logf("Pod1 IPv4 address is: %v, IPv6 address is: %v", pod1IPv4, pod1IPv6) CurlMultusPod2PodFail(oc, ns1, podNames[0], pod1IPv4, "net1", podenvname) CurlMultusPod2PodFail(oc, ns1, podNames[0], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns2, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns2, podNames[1], pod1IPv6, "net1", podenvname) })
test case
openshift/openshift-tests-private
bc7321bd-ebec-4454-b030-e41f69ed9210
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-High-77656-Verify ingress-ipblock policy for UDN pod's secondary interface (Layer2). [Disruptive]
['"context"', '"fmt"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-High-77656-Verify ingress-ipblock policy for UDN pod's secondary interface (Layer2). [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml") multinetworkipBlockIngressTemplateDual = filepath.Join(buildPruningBaseDir, "multihoming/multiNetworkPolicy_ingress_ipblock_template.yaml") patchSResource = "networks.operator.openshift.io/cluster" mtu int32 = 9000 podenvname = "Hello OpenShift" udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml") ) exutil.By("Getting the ready-schedulable worker nodes") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 1 { g.Skip("The cluster has no ready node for the testing") } exutil.By("Getting the namespace name") oc.CreateNamespaceUDN() ns := oc.Namespace() exutil.By("Enabling useMultiNetworkPolicy in the cluster") patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}") patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}") defer func() { patchResourceAsAdmin(oc, patchSResource, patchInfoFalse) exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy") waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False") waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False") }() patchResourceAsAdmin(oc, patchSResource, patchInfoTrue) exutil.By("Wait for the NetworkOperator to become functional after enabling useMultiNetworkPolicy") waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False") waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } exutil.By("Creating Layer2 UDN CRD with Primary role") var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-75239", namespace: ns, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrd.createLayer2DualStackUDNCRD(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-75658", namespace: ns, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrd.createLayer2SingleStackUDNCRD(oc) } err := waitUDNCRDApplied(oc, ns, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Creating Layer2 UDN CRD with Secondary role") ipv4cidr1 := "20.200.200.0/24" ipv6cidr1 := "2000:200:200::0/64" nadName1 := "ipblockingress77656" nsWithnad := ns + "/" + nadName1 udncrd1 := udnCRDResource{ crdname: nadName1, namespace: ns, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr1, IPv6cidr: ipv6cidr1, template: udnCRDdualStack, } udncrd1.createLayer2DualStackUDNCRD(oc) err = waitUDNCRDApplied(oc, udncrd1.namespace, udncrd1.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Creating three testing pods consuming above network-attach-definition in ns") pods := make([]udnPodSecNADResourceNode, 3) var podNames []string for i := 0; i < 3; i++ { pods[i] = udnPodSecNADResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nadname: udncrd1.crdname, nodename: nodeList.Items[0].Name, template: udnPodTemplate, } pods[i].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns, pods[i].name) podNames = append(podNames, pods[i].name) } exutil.By("Verifying the all pods get dual IPs") pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[0]) pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns, podNames[1]) pod3IPv4, pod3IPv6 := getPodMultiNetwork(oc, ns, podNames[2]) pod3IPv4WithCidr := pod3IPv4 + "/32" pod3IPv6WithCidr := pod3IPv6 + "/128" exutil.By("Verifying that there is no traffic blocked between pods") CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname) exutil.By("Creating ipBlock Ingress Dual CIDRs Policy to allow traffic only from pod3") defer removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-ingress", "-n", ns) IPBlock := multinetworkipBlockCIDRsDual{ name: "multinetworkipblock-dual-cidrs-ingress", namespace: ns, cidrIpv4: pod3IPv4WithCidr, cidrIpv6: pod3IPv6WithCidr, policyfor: nsWithnad, template: multinetworkipBlockIngressTemplateDual, } IPBlock.createMultinetworkipBlockCIDRDual(oc) policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output() o.Expect(policyerr).NotTo(o.HaveOccurred()) o.Expect(policyoutput).To(o.ContainSubstring("multinetworkipblock-dual-cidrs-ingress")) exutil.By("Verifying the ipBlock Ingress Dual CIDRs policy ensures that only traffic from pod3 is allowed") CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv4, "net1", podenvname) CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv6, "net1", podenvname) CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname) exutil.By("Deleting ipBlock Ingress Dual CIDRs Policy") removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-ingress", "-n", ns) policyoutput1, policyerr1 := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output() o.Expect(policyerr1).NotTo(o.HaveOccurred()) o.Expect(policyoutput1).NotTo(o.ContainSubstring("multinetworkipblock-dual-cidrs-ingress")) exutil.By("Verifying that there is no traffic blocked between pods after deleting policy") CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname) })
test case
openshift/openshift-tests-private
e65efc84-d471-4d32-81f3-7b9826c775cd
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-78125-Verify egress-ipblock policy for UDN pod's secondary interface (Layer2). [Disruptive]
['"context"', '"fmt"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-78125-Verify egress-ipblock policy for UDN pod's secondary interface (Layer2). [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml") multinetworkipBlockegressTemplateDual = filepath.Join(buildPruningBaseDir, "multihoming/multiNetworkPolicy_egress_ipblock_template.yaml") patchSResource = "networks.operator.openshift.io/cluster" mtu int32 = 9000 podenvname = "Hello OpenShift" udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml") ) exutil.By("Getting the ready-schedulable worker nodes") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 1 { g.Skip("The cluster has no ready node for the testing") } exutil.By("Getting the namespace name") oc.CreateNamespaceUDN() ns := oc.Namespace() exutil.By("Enabling useMultiNetworkPolicy in the cluster") patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}") patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}") defer func() { patchResourceAsAdmin(oc, patchSResource, patchInfoFalse) exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy") waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False") waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False") }() patchResourceAsAdmin(oc, patchSResource, patchInfoTrue) exutil.By("Waitting for the NetworkOperator to become functional after enabling useMultiNetworkPolicy") waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False") waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } exutil.By("Creating Layer2 UDN CRD with Primary role") var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-75239", namespace: ns, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrd.createLayer2DualStackUDNCRD(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-75658", namespace: ns, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrd.createLayer2SingleStackUDNCRD(oc) } err := waitUDNCRDApplied(oc, ns, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Creating Layer2 UDN CRD with Secondary role") ipv4cidr1 := "20.200.200.0/24" ipv6cidr1 := "2000:200:200::0/64" nadName1 := "ipblockegress78125" nsWithnad := ns + "/" + nadName1 udncrd1 := udnCRDResource{ crdname: nadName1, namespace: ns, role: "Secondary", mtu: mtu, IPv4cidr: ipv4cidr1, IPv6cidr: ipv6cidr1, template: udnCRDdualStack, } udncrd1.createLayer2DualStackUDNCRD(oc) err = waitUDNCRDApplied(oc, udncrd1.namespace, udncrd1.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Creating three testing pods consuming above network-attach-definition in ns") pods := make([]udnPodSecNADResourceNode, 3) var podNames []string for i := 0; i < 3; i++ { pods[i] = udnPodSecNADResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nadname: udncrd1.crdname, nodename: nodeList.Items[0].Name, template: udnPodTemplate, } pods[i].createUdnPodWithSecNADNode(oc) waitPodReady(oc, ns, pods[i].name) podNames = append(podNames, pods[i].name) } exutil.By("Verifying the all pods get dual IPs") pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[0]) pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns, podNames[1]) pod3IPv4, pod3IPv6 := getPodMultiNetwork(oc, ns, podNames[2]) pod3IPv4WithCidr := pod3IPv4 + "/32" pod3IPv6WithCidr := pod3IPv6 + "/128" exutil.By("Verifying that there is no traffic blocked between pods") CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv6, "net1", podenvname) exutil.By("Creating ipBlock egress Dual CIDRs Policy to allow traffic only to pod3") defer removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-egress", "-n", ns) IPBlock := multinetworkipBlockCIDRsDual{ name: "multinetworkipblock-dual-cidrs-egress", namespace: ns, cidrIpv4: pod3IPv4WithCidr, cidrIpv6: pod3IPv6WithCidr, policyfor: nsWithnad, template: multinetworkipBlockegressTemplateDual, } IPBlock.createMultinetworkipBlockCIDRDual(oc) policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output() o.Expect(policyerr).NotTo(o.HaveOccurred()) o.Expect(policyoutput).To(o.ContainSubstring("multinetworkipblock-dual-cidrs-egress")) exutil.By("Verifying the ipBlock egress Dual CIDRs policy ensures that only traffic to pod3 is allowed") CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv4, "net1", podenvname) CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv6, "net1", podenvname) CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv6, "net1", podenvname) exutil.By("Deleting ipBlock egress Dual CIDRs Policy") removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-egress", "-n", ns) policyoutput1, policyerr1 := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output() o.Expect(policyerr1).NotTo(o.HaveOccurred()) o.Expect(policyoutput1).NotTo(o.ContainSubstring("multinetworkipblock-dual-cidrs-egress")) exutil.By("Verifying that there is no traffic blocked between pods after deleting policy") CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv6, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv4, "net1", podenvname) CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv6, "net1", podenvname) })
test case
openshift/openshift-tests-private
2d5164bd-a656-4688-885f-93c24198b78a
Author:meinli-Medium-78329-Validate pod2pod on diff workers and host2pod on same/diff workers (UDN Layer3 with Primary role)
['"context"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-78329-Validate pod2pod on diff workers and host2pod on same/diff workers (UDN Layer3 with Primary role)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") ) exutil.By("1. Get worker node and namespace") nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } oc.CreateNamespaceUDN() ns := oc.Namespace() exutil.By("2. Create UDN CRD Layer3 with Primary role") err = applyL3UDNtoNamespace(oc, ns, 0) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create two pods on diff workers in ns") pods := make([]pingPodResourceNode, 2) for i := 0; i < 2; i++ { pods[i] = pingPodResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nodename: nodeList.Items[i].Name, template: pingPodNodeTemplate, } pods[i].createPingPodNode(oc) waitPodReady(oc, ns, pods[i].name) } exutil.By("4. Validate pod to pod on different workers") CurlPod2PodPassUDN(oc, ns, pods[0].name, ns, pods[1].name) exutil.By("5. validate host to pod on same and diff workers") CurlNode2PodFailUDN(oc, nodeList.Items[0].Name, ns, pods[0].name) CurlNode2PodFailUDN(oc, nodeList.Items[0].Name, ns, pods[1].name) })
test case
openshift/openshift-tests-private
e37e11ab-2bd7-45ce-940d-a646a2fef37e
Author:qiowang-High-77542-Check default network ports can be exposed on UDN pods(layer3) [Serial]
['"path/filepath"', '"strconv"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:qiowang-High-77542-Check default network ports can be exposed on UDN pods(layer3) [Serial]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml") statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml") tcpPort = 8080 udpPort = 6000 sctpPort = 30102 ) exutil.By("Preparing the nodes for SCTP") prepareSCTPModule(oc, sctpModule) exutil.By("1. Create the first namespace") oc.SetupProject() ns1 := oc.Namespace() exutil.By("2. Create a hello pod in ns1") createResourceFromFile(oc, ns1, statefulSetHelloPod) pod1Err := waitForPodWithLabelReady(oc, ns1, "app=hello") exutil.AssertWaitPollNoErr(pod1Err, "The statefulSet pod is not ready") pod1Name := getPodName(oc, ns1, "app=hello")[0] exutil.By("3. Create the 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() exutil.By("4. Create CRD for UDN in ns2") err := applyL3UDNtoNamespace(oc, ns2, 0) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. Create a udn hello pod in ns2") createResourceFromFile(oc, ns2, statefulSetHelloPod) pod2Err := waitForPodWithLabelReady(oc, ns2, "app=hello") exutil.AssertWaitPollNoErr(pod2Err, "The statefulSet pod is not ready") pod2Name := getPodName(oc, ns2, "app=hello")[0] exutil.By("6. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should not be able to access") PingPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name) CurlPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, false) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, false) exutil.By("7. Add annotation to expose default network port on udn pod") annotationConf := `k8s.ovn.org/open-default-ports=[{"protocol":"icmp"}, {"protocol":"tcp","port":` + strconv.Itoa(tcpPort) + `}, {"protocol":"udp","port":` + strconv.Itoa(udpPort) + `}, {"protocol":"sctp","port":` + strconv.Itoa(sctpPort) + `}]` err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("pod", pod2Name, "-n", ns2, "--overwrite", annotationConf).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("8. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should be able to access") PingPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name) CurlPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, true) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, true) })
test case
openshift/openshift-tests-private
3337392b-2a75-4c9d-aec7-33c5bf9b52aa
Author:qiowang-High-77742-Check default network ports can be exposed on UDN pods(layer2) [Serial]
['"path/filepath"', '"strconv"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:qiowang-High-77742-Check default network ports can be exposed on UDN pods(layer2) [Serial]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml") statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml") tcpPort = 8080 udpPort = 6000 sctpPort = 30102 ) exutil.By("Preparing the nodes for SCTP") prepareSCTPModule(oc, sctpModule) exutil.By("1. Create the first namespace") oc.SetupProject() ns1 := oc.Namespace() exutil.By("2. Create a hello pod in ns1") createResourceFromFile(oc, ns1, statefulSetHelloPod) pod1Err := waitForPodWithLabelReady(oc, ns1, "app=hello") exutil.AssertWaitPollNoErr(pod1Err, "The statefulSet pod is not ready") pod1Name := getPodName(oc, ns1, "app=hello")[0] exutil.By("3. Create the 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() exutil.By("4. Create CRD for UDN in ns2") var cidr, ipv4cidr, ipv6cidr string ipStackType := checkIPStackType(oc) if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } udncrd := udnCRDResource{ crdname: "udn-l2-network-77742", namespace: ns2, role: "Primary", mtu: 1300, } if ipStackType == "dualstack" { udncrd.IPv4cidr = ipv4cidr udncrd.IPv6cidr = ipv6cidr udncrd.template = udnCRDdualStack udncrd.createLayer2DualStackUDNCRD(oc) } else { udncrd.cidr = cidr udncrd.template = udnCRDSingleStack udncrd.createLayer2SingleStackUDNCRD(oc) } err := waitUDNCRDApplied(oc, ns2, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. Create a udn hello pod in ns2") createResourceFromFile(oc, ns2, statefulSetHelloPod) pod2Err := waitForPodWithLabelReady(oc, ns2, "app=hello") exutil.AssertWaitPollNoErr(pod2Err, "The statefulSet pod is not ready") pod2Name := getPodName(oc, ns2, "app=hello")[0] exutil.By("6. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should not be able to access") PingPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name) CurlPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, false) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, false) exutil.By("7. Add annotation to expose default network port on udn pod") annotationConf := `k8s.ovn.org/open-default-ports=[{"protocol":"icmp"}, {"protocol":"tcp","port":` + strconv.Itoa(tcpPort) + `}, {"protocol":"udp","port":` + strconv.Itoa(udpPort) + `}, {"protocol":"sctp","port":` + strconv.Itoa(sctpPort) + `}]` err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("pod", pod2Name, "-n", ns2, "--overwrite", annotationConf).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("8. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should be able to access") PingPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name) CurlPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, true) verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, true) })
test case
openshift/openshift-tests-private
b97994ae-7b44-44fb-afe8-3e1438a16397
Author:meinli-Medium-78492-[CUDN layer3] Validate CUDN enable creating shared OVN network across multiple namespaces. [Serial]
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-78492-[CUDN layer3] Validate CUDN enable creating shared OVN network across multiple namespaces. [Serial]", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") matchLabelKey = "test.io" matchValue = "cudn-network-" + getRandomString() crdName = "cudn-network-78492" ) exutil.By("1. Create three namespaces, first two for CUDN and label them with cudn selector, last namespace is for default network") var allNS []string for i := 0; i < 3; i++ { if i != 2 { oc.CreateNamespaceUDN() allNS = append(allNS, oc.Namespace()) } else { oc.SetupProject() allNS = append(allNS, oc.Namespace()) } if i < 2 { ns := allNS[i] defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } } exutil.By("2. create CUDN with two namespaces") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. create pods in ns1 and ns2, one pod in ns3") pods := make([]udnPodResource, 3) for i := 0; i < 3; i++ { pods[i] = udnPodResource{ name: "hello-pod-" + allNS[i], namespace: allNS[i], label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace) pods[i].createUdnPod(oc) waitPodReady(oc, pods[i].namespace, pods[i].name) } exutil.By("4. check pods' interfaces") for i := 0; i < 2; i++ { podIP, _ := getPodIPUDN(oc, pods[i].namespace, pods[i].name, "ovn-udn1") o.Expect(podIP).NotTo(o.BeEmpty()) } output, err := e2eoutput.RunHostCmd(pods[2].namespace, pods[2].name, "ip -o link show") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).ShouldNot(o.ContainSubstring("ovn-udn1")) exutil.By("5. Validate CUDN pod traffic") CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) })
test case
openshift/openshift-tests-private
584bb4cc-2c57-4736-91fc-618d1d98eea0
Author:meinli-Medium-78598-[CUDN layer2] Validate CUDN enable creating shared OVN network across multiple namespaces.
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-78598-[CUDN layer2] Validate CUDN enable creating shared OVN network across multiple namespaces.", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") matchLabelKey = "test.io" matchValue = "cudn-network-" + getRandomString() crdName = "cudn-network-78598" ) exutil.By("1. Create three namespaces, first two for CUDN and label them with cudn selector, last namespace is for default network") var allNS []string for i := 0; i < 3; i++ { if i != 2 { oc.CreateNamespaceUDN() allNS = append(allNS, oc.Namespace()) } else { oc.SetupProject() allNS = append(allNS, oc.Namespace()) } if i < 2 { ns := allNS[i] defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } } exutil.By("2. create CUDN with two namespaces") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. create pods in ns1 and ns2, one pod in ns3") pods := make([]udnPodResource, 3) for i := 0; i < 3; i++ { pods[i] = udnPodResource{ name: "hello-pod-" + allNS[i], namespace: allNS[i], label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace) pods[i].createUdnPod(oc) waitPodReady(oc, pods[i].namespace, pods[i].name) } exutil.By("4. check pods' interfaces") for i := 0; i < 2; i++ { podIP, _ := getPodIPUDN(oc, pods[i].namespace, pods[i].name, "ovn-udn1") o.Expect(podIP).NotTo(o.BeEmpty()) } output, err := e2eoutput.RunHostCmd(pods[2].namespace, pods[2].name, "ip -o link show") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).ShouldNot(o.ContainSubstring("ovn-udn1")) exutil.By("5. Validate CUDN pod traffic") CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) })
test case
openshift/openshift-tests-private
eb54c84b-3d26-4e55-9205-cd2849188fd9
Author:anusaxen-Low-77752-Check udn pods isolation with udn crd and native NAD integration
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:anusaxen-Low-77752-Check udn pods isolation with udn crd and native NAD integration", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) o.Expect(ipStackType).NotTo(o.BeEmpty()) if ipStackType != "ipv4single" { g.Skip("This case requires IPv4 single stack cluster") } var cidr string var prefix int32 if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" prefix = 24 } exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadNS := []string{ns1, ns2} nadResourcename := []string{"l3-network-" + nadNS[0], "l3-network-" + nadNS[1]} exutil.By(fmt.Sprintf("create native NAD %s in namespace %s", nadResourcename[0], nadNS[0])) nad := udnNetDefResource{ nadname: nadResourcename[0], namespace: nadNS[0], nad_network_name: nadResourcename[0], topology: "layer3", subnet: "10.150.0.0/16/24", mtu: mtu, net_attach_def_name: nadNS[0] + "/" + nadResourcename[0], role: "primary", template: udnNadtemplate, } nad.createUdnNad(oc) exutil.By(fmt.Sprintf("create crd NAD %s in namespace %s", nadResourcename[1], nadNS[1])) udncrd := udnCRDResource{ crdname: nadResourcename[1], namespace: nadNS[1], role: "Primary", mtu: mtu, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) err := waitUDNCRDApplied(oc, nadNS[1], udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { exutil.By("create a udn hello pod in ns1 and ns2") pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } //udn network connectivity should be isolated CurlPod2PodFailUDN(oc, nadNS[0], pod[0].name, nadNS[1], pod[1].name) //default network connectivity should also be isolated CurlPod2PodFail(oc, nadNS[0], pod[0].name, nadNS[1], pod[1].name) })
test case
openshift/openshift-tests-private
4c5717d5-354a-4d20-a28d-5a30abe993bb
Author:meinli-Medium-79003-[CUDN layer3] Verify that patching namespaces for existing CUDN functionality operate as intended
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-79003-[CUDN layer3] Verify that patching namespaces for existing CUDN functionality operate as intended", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") key = "test.cudn.layer3" crdName = "cudn-network-79003" values = []string{"value-79003-1", "value-79003-2"} ) exutil.By("1. create two namespaces and label them") oc.CreateNamespaceUDN() allNS := []string{oc.Namespace()} oc.CreateNamespaceUDN() allNS = append(allNS, oc.Namespace()) for i := 0; i < 2; i++ { defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[i], fmt.Sprintf("%s-", key)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("2. create CUDN in ns1") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) cudncrd, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer3", []string{values[0], ""}) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. patch namespaces for CUDN") patchCmd := fmt.Sprintf("{\"spec\":{\"namespaceSelector\":{\"matchExpressions\":[{\"key\": \"%s\", \"operator\": \"In\", \"values\": [\"%s\", \"%s\"]}]}}}", key, values[0], values[1]) patchResourceAsAdmin(oc, fmt.Sprintf("clusteruserdefinednetwork.k8s.ovn.org/%s", cudncrd.crdname), patchCmd) err = waitCUDNCRDApplied(oc, cudncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteruserdefinednetwork.k8s.ovn.org", cudncrd.crdname, "-ojsonpath={.status.conditions[*].message}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(allNS[1])) exutil.By("4. create pods in ns1 and ns2") pods := make([]udnPodResource, 2) for i, ns := range allNS { pods[i] = udnPodResource{ name: "hello-pod-" + ns, namespace: ns, label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace) pods[i].createUdnPod(oc) waitPodReady(oc, pods[i].namespace, pods[i].name) } exutil.By("5. validate connection from CUDN pod to CUDN pod") CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) exutil.By("6. unlabel ns2") err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[1], fmt.Sprintf("%s-", key)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = waitCUDNCRDApplied(oc, cudncrd.crdname) o.Expect(err).To(o.HaveOccurred()) output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteruserdefinednetwork.k8s.ovn.org", cudncrd.crdname, "-ojsonpath={.status.conditions[*].message}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("failed to delete NetworkAttachmentDefinition [%s/%s]", allNS[1], cudncrd.crdname))) exutil.By("7. validate connection from CUDN pod to CUDN pod") CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) })
test case
openshift/openshift-tests-private
c4ca4fd7-27cf-4215-9d5a-495a8ea23e47
Author:meinli-Medium-78742-[CUDN layer2] Validate pod2pod traffic between CUDN and UDN NAD. [Serial]
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-78742-[CUDN layer2] Validate pod2pod traffic between CUDN and UDN NAD. [Serial]", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") key = "test.cudn.layer2" crdName = "cudn-network-78742" values = []string{"value-78742-1", "value-78742-2"} ) exutil.By("1. create three namespaces, first and second for CUDN, third for UDN NAD") oc.CreateNamespaceUDN() cudnNS := []string{oc.Namespace()} oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) for i := 0; i < 2; i++ { defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s-", key)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } oc.CreateNamespaceUDN() nadNS := oc.Namespace() exutil.By("2. create CUDN in cudnNS") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer2", values) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. create UDN NAD in nadNS") var subnet string if ipStackType == "ipv4single" { subnet = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { subnet = "2011:100:200::0/60" } else { subnet = "10.151.0.0/16,2011:100:200::0/60" } } nadResourcename := "l2-network" + nadNS nad := udnNetDefResource{ nadname: nadResourcename, namespace: nadNS, nad_network_name: nadResourcename, topology: "layer2", subnet: subnet, mtu: 1300, net_attach_def_name: nadNS + "/" + nadResourcename, role: "primary", template: udnNadtemplate, } nad.createUdnNad(oc) exutil.By("Verifying the configued NetworkAttachmentDefinition") if checkNAD(oc, nadNS, nadResourcename) { e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename) } else { e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename) } exutil.By("4. create pods in cudnNS and nadNS") pods := make([]udnPodResource, 3) for i, ns := range append(cudnNS, nadNS) { pods[i] = udnPodResource{ name: "hello-pod-" + ns, namespace: ns, label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace) pods[i].createUdnPod(oc) waitPodReady(oc, pods[i].namespace, pods[i].name) } exutil.By("5. Validate isolation from UDN NAD pod to CUDN pod") CurlPod2PodFailUDN(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name) //default network connectivity should also be isolated CurlPod2PodFail(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name) exutil.By("6. Validate isolation from CUDN pod to UDN NAD pod") CurlPod2PodFailUDN(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name) //default network connectivity should also be isolated CurlPod2PodFail(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name) exutil.By("7. Validate connection among CUDN pods") CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) //default network connectivity should be isolated CurlPod2PodFail(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) })
test case
openshift/openshift-tests-private
02f1f913-ea15-4181-834d-a80f67d7d32f
Author:meinli-Medium-78496-[CUDN layer3] Validate conflicted creation when CUDN and UDN created in the same namespace.
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-78496-[CUDN layer3] Validate conflicted creation when CUDN and UDN created in the same namespace.", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") matchLabelKey = "test.io" matchValue = "cudn-network-" + getRandomString() crdName = "cudn-network-78496" ) exutil.By("1. create two namespaces") oc.CreateNamespaceUDN() allNS := []string{oc.Namespace()} oc.CreateNamespaceUDN() allNS = append(allNS, oc.Namespace()) for _, ns := range allNS { defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("2. Create UDN CRD and pod in ns1") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } createGeneralUDNCRD(oc, allNS[0], "udn-network-78496-ns1", ipv4cidr, ipv6cidr, cidr, "layer3") udnpod := udnPodResource{ name: "hello-pod-" + allNS[0], namespace: allNS[0], label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", udnpod.name, "-n", udnpod.namespace) udnpod.createUdnPod(oc) waitPodReady(oc, udnpod.namespace, udnpod.name) exutil.By("3. create CUDN in ns1 and ns2") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) cudncrd, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3") o.Expect(err).To(o.HaveOccurred()) exutil.By("4. Create pods in ns2") cudnpod := udnPodResource{ name: "hello-pod-" + allNS[1], namespace: allNS[1], label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", cudnpod.name, "-n", cudnpod.namespace) cudnpod.createUdnPod(oc) waitPodReady(oc, cudnpod.namespace, cudnpod.name) exutil.By("5. validate CUDN in ns1 create failed and CUDN in ns2 create successfully") output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("clusteruserdefinednetwork.k8s.ovn.org", cudncrd.crdname).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("primary network already exist in namespace \"%s\"", allNS[0]))) cudnPodIP, _ := getPodIPUDN(oc, cudnpod.namespace, cudnpod.name, "ovn-udn1") o.Expect(cudnPodIP).NotTo(o.BeEmpty()) exutil.By("6. validate traffic isolation between UDN pod and CUDN pod") CurlPod2PodFailUDN(oc, allNS[0], udnpod.name, allNS[1], cudnpod.name) })
test case
openshift/openshift-tests-private
84d82f43-051a-4cdd-81d1-ec0c60c3fc83
Author:meinli-Medium-78741-[CUDN layer3] validate pod2pod traffic between CUDN and UDN CRD. [Serial]
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
g.It("Author:meinli-Medium-78741-[CUDN layer3] validate pod2pod traffic between CUDN and UDN CRD. [Serial]", func() { var ( udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") matchLabelKey = "test.io" matchValue = "cudn-network-" + getRandomString() crdName = "cudn-network-78741" ) exutil.By("1. create three namespaces, first and second for CUDN, third for UDN") oc.CreateNamespaceUDN() cudnNS := []string{oc.Namespace()} oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) for _, ns := range cudnNS { defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } oc.CreateNamespaceUDN() udnNS := oc.Namespace() exutil.By("2. create CUDN in cudnNS") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. create UDN in ns3") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" } } createGeneralUDNCRD(oc, udnNS, "udn-network-78741", ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("4. create pods in namespaces") pods := make([]udnPodResource, 3) for i, ns := range append(cudnNS, udnNS) { pods[i] = udnPodResource{ name: "hello-pod-" + ns, namespace: ns, label: "hello-pod", template: udnPodTemplate, } defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace) pods[i].createUdnPod(oc) waitPodReady(oc, pods[i].namespace, pods[i].name) } exutil.By("5. Validate isolation from UDN pod to CUDN pod") CurlPod2PodFailUDN(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name) //default network connectivity should also be isolated CurlPod2PodFail(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name) exutil.By("6. Validate isolation from CUDN pod to UDN pod") CurlPod2PodFailUDN(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name) //default network connectivity should also be isolated CurlPod2PodFail(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name) exutil.By("7. Validate connection among CUDN pods") CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) //default network connectivity should be isolated CurlPod2PodFail(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name) })
test
openshift/openshift-tests-private
9dbc3232-5324-4144-8a09-c2708a8f2bd4
service_udn
import ( "context" "fmt" "net" "os/exec" "path/filepath" "strconv" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/openshift-tests-private/test/extended/util" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" )
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
package networking import ( "context" "fmt" "net" "os/exec" "path/filepath" "strconv" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/openshift-tests-private/test/extended/util" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" ) var _ = g.Describe("[sig-networking] SDN udn services", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("networking-udn", exutil.KubeConfigPath()) testDataDirUDN = exutil.FixturePath("testdata", "networking/udn") ) g.BeforeEach(func() { SkipIfNoFeatureGate(oc, "NetworkSegmentation") networkType := checkNetworkType(oc) if !strings.Contains(networkType, "ovn") { g.Skip("Skip testing on non-ovn cluster!!!") } }) g.It("Author:huirwang-High-76017-Service should be able to access for same NAD UDN pods in different namespaces (L3/L2).", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") mtu int32 = 1300 ipFamilyPolicy = "SingleStack" ) ipStackType := checkIPStackType(oc) exutil.By("Get first namespace") var nadNS []string = make([]string, 0, 4) exutil.By("Create another 3 namespaces") for i := 0; i < 4; i++ { oc.CreateNamespaceUDN() nadNS = append(nadNS, oc.Namespace()) } nadResourcename := []string{"l3-network-test", "l2-network-test"} topo := []string{"layer3", "layer3", "layer2", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.150.0.0/16/24", "10.152.0.0/16", "10.152.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2010:100:200::0/60", "2012:100:200::0/60", "2012:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"} ipFamilyPolicy = "PreferDualStack" } } exutil.By("5. Create same NAD in ns1 ns2 for layer3") nad := make([]udnNetDefResource, 4) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[0], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[0], namespace: nadNS[i], nad_network_name: nadResourcename[0], // Need to use same nad name topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[0], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("6. Create same NAD in ns3 ns4 for layer 2") for i := 2; i < 4; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[1], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[1], namespace: nadNS[i], nad_network_name: nadResourcename[1], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[1], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("7. Create one pod in respective namespaces ns1,ns2,ns3,ns4") pod := make([]udnPodResource, 4) for i := 0; i < 4; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) // add a step to check ovn-udn1 created. output, err := e2eoutput.RunHostCmd(pod[i].namespace, pod[i].name, "ip -o link show") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("ovn-udn1")) } exutil.By("8. Create service in ns2,ns4") svc1 := genericServiceResource{ servicename: "test-service", namespace: nadNS[1], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc1.createServiceFromParams(oc) svc2 := genericServiceResource{ servicename: "test-service", namespace: nadNS[3], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc2.createServiceFromParams(oc) exutil.By("9. Verify ClusterIP service in ns2 can be accessed from pod in ns1 for layer 3") CurlPod2SvcPass(oc, nadNS[0], nadNS[1], pod[0].name, svc1.servicename) exutil.By("10. Verify ClusterIP service in ns4 can be accessed from pod in ns3 for layer 2") CurlPod2SvcPass(oc, nadNS[2], nadNS[3], pod[2].name, svc2.servicename) }) g.It("Author:huirwang-Medium-76016-Service exists before NAD is created (L3/L2).", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") mtu int32 = 1300 ipFamilyPolicy = "SingleStack" ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l3-network-" + ns1, "l2-network-" + ns2} nadNS := []string{ns1, ns2} topo := []string{"layer3", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.152.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2012:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"} ipFamilyPolicy = "PreferDualStack" } } exutil.By("3. Create a service without any serving pods") svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) } exutil.By("4. Create NAD in ns1 ns2 for layer3,layer2") nad := make([]udnNetDefResource, 4) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("7. Create 2 pods in ns1,ns2") pod := make([]udnPodResource, 4) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("7. Create another two pods in ns1,ns2") for i := 2; i < 4; i++ { pod[i] = udnPodResource{ name: "hello-pod-test", namespace: nadNS[i-2], label: "hello-pod-test", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("Verify the service can be accessed for layer 3") CurlPod2SvcPass(oc, ns1, ns1, pod[2].name, svc[0].servicename) exutil.By("Verify the service can be accessed for layer 2") CurlPod2SvcPass(oc, ns2, ns2, pod[3].name, svc[1].servicename) }) g.It("Author:huirwang-High-76796-Idling/Unidling services should work for UDN pods. (L3/L2).", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1.Get first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l3-network-" + ns1, "l2-network-" + ns2} nadNS := []string{ns1, ns2} topo := []string{"layer3", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.152.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2012:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"} } } exutil.By("3. Create NAD in ns1 ns2 for layer3,layer2") nad := make([]udnNetDefResource, 4) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } for i := 0; i < len(nadNS); i++ { exutil.By(fmt.Sprintf("Create a service in namespace %v.", nadNS[i])) createResourceFromFile(oc, nadNS[i], testSvcFile) waitForPodWithLabelReady(oc, nadNS[i], "name=test-pods") svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } if ipStackType == "dualstack" { svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("Recreate dualstack service in namepsace %v.", nadNS[i])) removeResource(oc, true, true, "service", "test-service", "-n", nadNS[i]) svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "test-pods", serviceType: "ClusterIP", ipFamilyPolicy: "PreferDualStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } } exutil.By("6. idle test-service") idleOutput, idleErr := oc.AsAdmin().WithoutNamespace().Run("idle").Args("-n", ns1, "test-service").Output() o.Expect(idleErr).NotTo(o.HaveOccurred()) o.Expect(idleOutput).To(o.ContainSubstring("The service \"%v/test-service\" has been marked as idled", ns1)) idleOutput, idleErr = oc.AsAdmin().WithoutNamespace().Run("idle").Args("-n", ns2, "test-service").Output() o.Expect(idleErr).NotTo(o.HaveOccurred()) o.Expect(idleOutput).To(o.ContainSubstring("The service \"%v/test-service\" has been marked as idled", ns2)) exutil.By("7. check test pod in ns1 terminated") getPodOutput := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { output, getPodErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns1).Output() o.Expect(getPodErr).NotTo(o.HaveOccurred()) e2e.Logf("pods status: %s", output) if strings.Contains(output, "No resources found") { return true, nil } e2e.Logf("pods are not terminated, try again") return false, nil }) exutil.AssertWaitPollNoErr(getPodOutput, fmt.Sprintf("Fail to terminate pods:%s", getPodOutput)) exutil.By("8. check test pod in ns2 terminated") getPodOutput = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { output, getPodErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns2).Output() o.Expect(getPodErr).NotTo(o.HaveOccurred()) e2e.Logf("pods status: %s", output) if strings.Contains(output, "No resources found") { return true, nil } e2e.Logf("pods are not terminated, try again") return false, nil }) exutil.AssertWaitPollNoErr(getPodOutput, fmt.Sprintf("Fail to terminate pods:%s", getPodOutput)) exutil.By("9. Create a test pod in ns1,ns2") pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("10. Verify unidling the service can be accessed for layer 3") svcIP1, svcIP2 := getSvcIP(oc, ns1, "test-service") if svcIP2 != "" { _, err := e2eoutput.RunHostCmdWithRetries(ns1, pod[0].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod[0].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP2, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmdWithRetries(ns1, pod[0].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("11. Verify unidling the service can be accessed for layer 2") svcIP1, svcIP2 = getSvcIP(oc, ns2, "test-service") if svcIP2 != "" { _, err := e2eoutput.RunHostCmdWithRetries(ns2, pod[1].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmdWithRetries(ns2, pod[1].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP2, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmdWithRetries(ns2, pod[1].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } }) g.It("Author:huirwang-Critical-76732-Validate pod2Service/nodePortService for UDN(Layer2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_dualstack_template.yaml") udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } exutil.By("Create CRD for UDN") var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-76732", namespace: ns1, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrd.createLayer2DualStackUDNCRD(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-76732", namespace: ns1, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrd.createLayer2SingleStackUDNCRD(oc) } exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") clientPod1 := pingPodResourceNode{ name: "client-pod-1", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } clientPod1.createPingPodNode(oc) waitPodReady(oc, clientPod1.namespace, clientPod1.name) // Update label for pod2 to a different one err := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", clientPod1.name, "name=client-pod-1", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns1 on same node as pod1") clientPod2 := pingPodResourceNode{ name: "client-pod-2", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } clientPod2.createPingPodNode(oc) waitPodReady(oc, clientPod2.namespace, clientPod2.name) // Update label for pod3 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", clientPod2.name, "name=client-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. create a service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both clientPod1 and clientPod2") CurlPod2SvcPass(oc, ns1, ns1, clientPod1.name, svc.servicename) CurlPod2SvcPass(oc, ns1, ns1, clientPod2.name, svc.servicename) exutil.By("8. Create a second namespace") oc.SetupProject() ns2 := oc.Namespace() exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, ns2, testPodFile) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, ns2, "name=test-pods") exutil.By("10. Not be able to access udn service from default network.") CurlPod2SvcFail(oc, ns2, ns1, testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from udn network.") CurlPod2SvcFail(oc, ns1, ns2, clientPod1.name, "test-service") exutil.By("11. Create third namespace for udn pod") oc.CreateNamespaceUDN() ns3 := oc.Namespace() exutil.By("12. Create CRD in third namespace") if ipStackType == "ipv4single" { cidr = "10.160.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:200:200::0/48" } else { ipv4cidr = "10.160.0.0/16" ipv6cidr = "2010:200:200::0/48" } } var udncrdns3 udnCRDResource if ipStackType == "dualstack" { udncrdns3 = udnCRDResource{ crdname: "udn-network-ds-76732-ns3", namespace: ns3, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrdns3.createLayer2DualStackUDNCRD(oc) } else { udncrdns3 = udnCRDResource{ crdname: "udn-network-ss-76732-ns3", namespace: ns3, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrdns3.createLayer2SingleStackUDNCRD(oc) } err = waitUDNCRDApplied(oc, ns3, udncrdns3.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in third namespace") createResourceFromFile(oc, ns3, testPodFile) err = waitForPodWithLabelReady(oc, ns3, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, ns3, "name=test-pods") exutil.By("14. Verify different udn network, service was isolated.") CurlPod2SvcFail(oc, ns3, ns1, testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for udn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, ns1, ns1, clientPod2.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, ns1, ns1, clientPod1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") removeResource(oc, true, true, "service", "test-service", "-n", ns1) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) //Ignore below steps because of bug https://issues.redhat.com/browse/OCPBUGS-43085 exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) }) g.It("Author:huirwang-Critical-75942-Validate pod2Service/nodePortService for UDN(Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_dualstack2_template.yaml") udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" prefix = 24 } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" prefix = 64 } else { ipv4cidr = "10.150.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:100:200::0/48" ipv6prefix = 64 ipFamilyPolicy = "PreferDualStack" } } var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-ds-75942", namespace: ns1, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-ss-75942", namespace: ns1, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err := waitUDNCRDApplied(oc, ns1, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns1 on same node as pod1") pod3ns1 := pingPodResourceNode{ name: "hello-pod-3", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod3ns1.createPingPodNode(oc) waitPodReady(oc, pod3ns1.namespace, pod3ns1.name) // Update label for pod3 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod3ns1.name, "name=hello-pod-3", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both pod2 and pod3") CurlPod2SvcPass(oc, ns1, ns1, pod2ns1.name, svc.servicename) CurlPod2SvcPass(oc, ns1, ns1, pod3ns1.name, svc.servicename) exutil.By("8. Create second namespace") oc.SetupProject() ns2 := oc.Namespace() exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, ns2, testPodFile) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, ns2, "name=test-pods") exutil.By("10. Not be able to access udn service from default network.") CurlPod2SvcFail(oc, ns2, ns1, testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from udn network.") CurlPod2SvcFail(oc, ns1, ns2, pod2ns1.name, "test-service") exutil.By("11. Create third namespace for udn pod") oc.CreateNamespaceUDN() ns3 := oc.Namespace() exutil.By("12. Create CRD in third namespace") if ipStackType == "ipv4single" { cidr = "10.160.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:200:200::0/48" prefix = 64 } else { ipv4cidr = "10.160.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:200:200::0/48" ipv6prefix = 64 } } var udncrdns3 udnCRDResource if ipStackType == "dualstack" { udncrdns3 = udnCRDResource{ crdname: "udn-network-ds-75942-ns3", namespace: ns3, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrdns3.createUdnCRDDualStack(oc) } else { udncrdns3 = udnCRDResource{ crdname: "udn-network-ss-75942-ns3", namespace: ns3, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrdns3.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, ns3, udncrdns3.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in third namespace") createResourceFromFile(oc, ns3, testPodFile) err = waitForPodWithLabelReady(oc, ns3, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, ns3, "name=test-pods") exutil.By("14. Verify different udn network, service was isolated.") CurlPod2SvcFail(oc, ns3, ns1, testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for udn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, ns1, ns1, pod3ns1.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, ns1, ns1, pod2ns1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) //Ignore below steps because of bug https://issues.redhat.com/browse/OCPBUGS-43085 exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) }) g.It("Author:meinli-Critical-78238-Validate host/pod to nodeport with externalTrafficPolicy is local/cluster on same/diff workers (UDN layer3 and default network)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) exutil.By("0. Get three worker nodes") nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This case requires 3 nodes, but the cluster has less than three nodes") } exutil.By("1. Create two namespaces, first one is for default network and second is for UDN and then label namespaces") ns1 := oc.Namespace() oc.CreateNamespaceUDN() ns2 := oc.Namespace() ns := []string{ns1, ns2} for _, namespace := range ns { err = exutil.SetNamespacePrivileged(oc, namespace) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("2. Create UDN CRD in ns2") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" prefix = 24 } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" prefix = 64 } else { ipv4cidr = "10.150.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:100:200::0/48" ipv6prefix = 64 ipFamilyPolicy = "PreferDualStack" } } var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-ds-78238", namespace: ns2, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-ss-78238", namespace: ns2, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, ns2, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create two pods and nodeport service with externalTrafficPolicy=Local in ns1 and ns2") nodeportsLocal := []string{} pods := make([]pingPodResourceNode, 2) svcs := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("3.%d Create pod and nodeport service with externalTrafficPolicy=Local in %s", i, ns[i])) pods[i] = pingPodResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns[i], nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pods[i].createPingPodNode(oc) waitPodReady(oc, ns[i], pods[i].name) svcs[i] = genericServiceResource{ servicename: "test-service" + strconv.Itoa(i), namespace: ns[i], protocol: "TCP", selector: "hello-pod", serviceType: "NodePort", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Local", template: genericServiceTemplate, } svcs[i].createServiceFromParams(oc) nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns[i], svcs[i].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeportsLocal = append(nodeportsLocal, nodePort) } exutil.By("4. Validate pod/host to nodeport service with externalTrafficPolicy=Local traffic") for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("4.1.%d Validate pod to nodeport service with externalTrafficPolicy=Local traffic in %s", i, ns[i])) CurlPod2NodePortPass(oc, ns[i], pods[i].name, nodeList.Items[0].Name, nodeportsLocal[i]) CurlPod2NodePortFail(oc, ns[i], pods[i].name, nodeList.Items[1].Name, nodeportsLocal[i]) } exutil.By("4.2 Validate host to nodeport service with externalTrafficPolicy=Local traffic on default network") CurlNodePortPass(oc, nodeList.Items[2].Name, nodeList.Items[0].Name, nodeportsLocal[0]) CurlNodePortFail(oc, nodeList.Items[2].Name, nodeList.Items[1].Name, nodeportsLocal[0]) exutil.By("4.3 Validate UDN pod to default network nodeport service with externalTrafficPolicy=Local traffic") CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[0].Name, nodeportsLocal[0]) CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[1].Name, nodeportsLocal[0]) exutil.By("5. Create nodeport service with externalTrafficPolicy=Cluster in ns1 and ns2") nodeportsCluster := []string{} for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("5.%d Create pod and nodeport service with externalTrafficPolicy=Cluster in %s", i, ns[i])) removeResource(oc, true, true, "svc", "test-service"+strconv.Itoa(i), "-n", ns[i]) svcs[i].externalTrafficPolicy = "Cluster" svcs[i].createServiceFromParams(oc) nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns[i], svcs[i].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeportsCluster = append(nodeportsCluster, nodePort) } exutil.By("6. Validate pod/host to nodeport service with externalTrafficPolicy=Cluster traffic") for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("6.1.%d Validate pod to nodeport service with externalTrafficPolicy=Cluster traffic in %s", i, ns[i])) CurlPod2NodePortPass(oc, ns[i], pods[i].name, nodeList.Items[0].Name, nodeportsCluster[i]) CurlPod2NodePortPass(oc, ns[i], pods[i].name, nodeList.Items[1].Name, nodeportsCluster[i]) } exutil.By("6.2 Validate host to nodeport service with externalTrafficPolicy=Cluster traffic on default network") CurlNodePortPass(oc, nodeList.Items[2].Name, nodeList.Items[0].Name, nodeportsCluster[0]) CurlNodePortPass(oc, nodeList.Items[2].Name, nodeList.Items[1].Name, nodeportsCluster[0]) exutil.By("6.3 Validate UDN pod to default network nodeport service with externalTrafficPolicy=Cluster traffic") CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[0].Name, nodeportsLocal[0]) CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[1].Name, nodeportsLocal[0]) }) g.It("Author:huirwang-High-76014-Validate LoadBalancer service for UDN pods (Layer3/Layer2)", func() { buildPruningBaseDir := exutil.FixturePath("testdata", "networking") udnPodTemplate := filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") udnCRDSingleStack := filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml") udnL2CRDSingleStack := filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml") platform := exutil.CheckPlatform(oc) e2e.Logf("platform %s", platform) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") || strings.Contains(platform, "aws") if !acceptedPlatform { g.Skip("Test cases should be run on connected AWS,GCP, Azure, skip for other platforms or disconnected cluster!!") } exutil.By("1. Get namespaces and create a new namespace ") oc.CreateNamespaceUDN() ns1 := oc.Namespace() oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadNS := []string{ns1, ns2} exutil.By("2. Create CRD for UDN for layer 3") udncrd := udnCRDResource{ crdname: "udn-network-l3-76014", namespace: nadNS[0], role: "Primary", mtu: 1400, cidr: "10.200.0.0/16", prefix: 24, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) err := waitUDNCRDApplied(oc, nadNS[0], udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create CRD for UDN for layer 2") udnl2crd := udnCRDResource{ crdname: "udn-network-l2-76014", namespace: nadNS[1], role: "Primary", mtu: 1400, cidr: "10.210.0.0/16", template: udnL2CRDSingleStack, } udnl2crd.createLayer2SingleStackUDNCRD(oc) err = waitUDNCRDApplied(oc, nadNS[1], udnl2crd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("4. Create a pod for service per namespace.") pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("5. Create LoadBalancer service.") svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "hello-pod", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i], svc[i].servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc[i].servicename)) } exutil.By("6. Get LoadBalancer service URL.") var svcExternalIP [2]string for i := 0; i < 2; i++ { if platform == "aws" { svcExternalIP[i] = getLBSVCHostname(oc, nadNS[i], svc[i].servicename) } else { svcExternalIP[i] = getLBSVCIP(oc, nadNS[i], svc[i].servicename) } e2e.Logf("Got externalIP service IP: %v from namespace %s", svcExternalIP[i], nadNS[i]) o.Expect(svcExternalIP[i]).NotTo(o.BeEmpty()) } exutil.By("7.Curl the service from test runner\n") var svcURL, svcCmd [2]string for i := 0; i < 2; i++ { svcURL[i] = net.JoinHostPort(svcExternalIP[i], "27017") svcCmd[i] = fmt.Sprintf("curl %s --connect-timeout 30", svcURL[i]) e2e.Logf("\n svcCmd: %v\n", svcCmd[i]) err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcCmd[i]).Output() if err1 != nil || !strings.Contains(string(output), "Hello OpenShift") { e2e.Logf("got err:%v, and try next round", err1) return false, nil } e2e.Logf("The external service %v access passed!", svcURL[i]) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Fail to curl the externalIP service from test runner %s", svcURL[i])) } }) g.It("Author:huirwang-NonHyperShiftHOST-High-76019-Validate ExternalIP service for UDN pods (Layer3), [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, ns1, "udn-network-76019-ns1", ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("6. Find externalIP") nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[0].Name) externalIP := nodeIP2 exutil.By("7.Patch update network.config to enable externalIP \n") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") if ipStackType == "dualstack" { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\",\""+nodeIP1+"\"]}}}}") } else { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\"]}}}}") } exutil.By("8.Patch ExternalIP to service\n") patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(externalIP)) exutil.By("9.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("10.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) if ipStackType == "dualstack" { exutil.By("10.Retest it with IPv6 address in dualstack cluster\n") exutil.By("11.Patch IPv6 ExternalIP to service\n") externalIP := nodeIP1 patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("12.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("14.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } }) g.It("Author:huirwang-High-77827-Restarting ovn pods should not break service. [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") ) exutil.By("1.Get first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadNS := []string{ns1, ns2} exutil.By("3. Create CRD for layer3 UDN in first namespace.") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } createGeneralUDNCRD(oc, nadNS[0], "udn-network-77827-ns1", ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("4. Create CRD for layer2 UDN in second namespace.") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" } } createGeneralUDNCRD(oc, nadNS[1], "udn-network-77827-ns2", ipv4cidr, ipv6cidr, cidr, "layer2") exutil.By("5. Create service and test pods in both namespaces.") for i := 0; i < len(nadNS); i++ { exutil.By(fmt.Sprintf("Create a service in namespace %v.", nadNS[i])) createResourceFromFile(oc, nadNS[i], testSvcFile) waitForPodWithLabelReady(oc, nadNS[i], "name=test-pods") svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } if ipStackType == "dualstack" { svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("Recreate dualstack service in namepsace %v.", nadNS[i])) removeResource(oc, true, true, "service", "test-service", "-n", nadNS[i]) svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "test-pods", serviceType: "ClusterIP", ipFamilyPolicy: "PreferDualStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } } exutil.By("6. Create a client test pod in ns1,ns2") pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("7. Restart ovn pods") err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", "openshift-ovn-kubernetes").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.AssertAllPodsToBeReady(oc, "openshift-ovn-kubernetes") exutil.By("8. Verify the service can be accessed for layer2.") for i := 0; i < 3; i++ { CurlPod2SvcPass(oc, nadNS[1], nadNS[1], pod[1].name, "test-service") } exutil.By("9. Verify the service can be accessed for layer3.") /* https://issues.redhat.com/browse/OCPBUGS-44174 for i := 0; i < 3; i++ { CurlPod2SvcPass(oc, nadNS[0], nadNS[0], pod[0].name, "test-service") }*/ }) g.It("Author:huirwang-NonHyperShiftHOST-High-76731-Validate ExternalIP service for UDN pods (Layer2), [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, ns1, "udn-network-76731-ns1", ipv4cidr, ipv6cidr, cidr, "layer2") exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("6. Find externalIP") nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[0].Name) externalIP := nodeIP2 exutil.By("7.Patch update network.config to enable externalIP \n") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") if ipStackType == "dualstack" { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\",\""+nodeIP1+"\"]}}}}") } else { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\"]}}}}") } exutil.By("8.Patch ExternalIP to service\n") patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(externalIP)) exutil.By("9.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("10.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) if ipStackType == "dualstack" { exutil.By("10.Retest it with IPv6 address in dualstack cluster\n") exutil.By("11.Patch IPv6 ExternalIP to service\n") externalIP := nodeIP1 patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("12.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("14.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } }) g.It("Author:huirwang-High-78767-Validate service for CUDN(Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" key = "test.cudn.layer3" crdName = "cudn-network-78767" crdName2 = "cudn-network-78767-2" values = []string{"value-78767-1", "value-78767-2"} values2 = []string{"value2-78767-1", "value2-78767-2"} cudnNS = []string{} ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Create CRD for CUDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer3", values) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2. Create 2 namespaces and add related values.") for i := 0; i < 2; i++ { oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s-", key)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: cudnNS[0], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns1.name, "-n", pod1ns1.namespace) pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: cudnNS[0], nodename: nodeList.Items[1].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod2ns1.name, "-n", pod2ns1.namespace) pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", cudnNS[0], "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns2. ") pod1ns2 := pingPodResourceNode{ name: "hello-pod-3", namespace: cudnNS[1], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns2.name, "-n", pod1ns2.namespace) pod1ns2.createPingPodNode(oc) waitPodReady(oc, pod1ns2.namespace, pod1ns2.name) exutil.By("6. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: cudnNS[0], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both pod2 in ns1 and pod3 in ns2") CurlPod2SvcPass(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("8. Create third namespace") oc.SetupProject() cudnNS = append(cudnNS, oc.Namespace()) exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, cudnNS[2], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[2], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, cudnNS[2], "name=test-pods") exutil.By("10. Not be able to access cudn service from default network.") CurlPod2SvcFail(oc, cudnNS[2], cudnNS[0], testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from cudn network.") CurlPod2SvcFail(oc, cudnNS[1], cudnNS[2], pod2ns1.name, "test-service") exutil.By("11. Create fourth namespace for cudn pod") oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s=%s", key, values2[0])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("12. Create CRD in fourth namespace") if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer func() { oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s-", key)).Execute() removeResource(oc, true, true, "namespace", cudnNS[3]) removeResource(oc, true, true, "clusteruserdefinednetwork", crdName2) }() _, err = createCUDNCRD(oc, key, crdName2, ipv4cidr, ipv6cidr, cidr, "layer3", values2) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in fourth namespace") createResourceFromFile(oc, cudnNS[3], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[3], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, cudnNS[3], "name=test-pods") exutil.By("14. Verify different cudn network, service was isolated.") CurlPod2SvcFail(oc, cudnNS[3], cudnNS[0], testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for cudn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", cudnNS[0]).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", cudnNS[0], svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) }) g.It("Author:huirwang-High-78768-Validate service for CUDN(Layer2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" key = "test.cudn.layer2" crdName = "cudn-network-78768" crdName2 = "cudn-network-78768-2" values = []string{"value-78768-1", "value-78768-2"} values2 = []string{"value2-78768-1", "value2-78768-2"} cudnNS = []string{} ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Create CRD for CUDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer2", values) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2. Create 2 namespaces and add related values.") for i := 0; i < 2; i++ { oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s-", key)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: cudnNS[0], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns1.name, "-n", pod1ns1.namespace) pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: cudnNS[0], nodename: nodeList.Items[1].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod2ns1.name, "-n", pod2ns1.namespace) pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", cudnNS[0], "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns2. ") pod1ns2 := pingPodResourceNode{ name: "hello-pod-3", namespace: cudnNS[1], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns2.name, "-n", pod1ns2.namespace) pod1ns2.createPingPodNode(oc) waitPodReady(oc, pod1ns2.namespace, pod1ns2.name) exutil.By("6. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: cudnNS[0], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both pod2 in ns1 and pod3 in ns2") CurlPod2SvcPass(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("8. Create third namespace") oc.SetupProject() cudnNS = append(cudnNS, oc.Namespace()) exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, cudnNS[2], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[2], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, cudnNS[2], "name=test-pods") exutil.By("10. Not be able to access cudn service from default network.") CurlPod2SvcFail(oc, cudnNS[2], cudnNS[0], testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from cudn network.") CurlPod2SvcFail(oc, cudnNS[1], cudnNS[2], pod2ns1.name, "test-service") exutil.By("11. Create fourth namespace for cudn pod") oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s=%s", key, values2[0])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("12. Create CRD in fourth namespace") if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer func() { oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s-", key)).Execute() removeResource(oc, true, true, "namespace", cudnNS[3]) removeResource(oc, true, true, "clusteruserdefinednetwork", crdName2) }() _, err = createCUDNCRD(oc, key, crdName2, ipv4cidr, ipv6cidr, cidr, "layer2", values2) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in fourth namespace") createResourceFromFile(oc, cudnNS[3], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[3], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, cudnNS[3], "name=test-pods") exutil.By("14. Verify different cudn network, service was isolated.") CurlPod2SvcFail(oc, cudnNS[3], cudnNS[0], testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for cudn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", cudnNS[0]).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", cudnNS[0], svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) }) g.It("Author:qiowang-ConnectedOnly-PreChkUpgrade-High-79060-Validate UDN LoadBalancer service post upgrade", func() { platform := exutil.CheckPlatform(oc) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") || strings.Contains(platform, "aws") if !acceptedPlatform { g.Skip("Test cases should be run on connected GCP/Azure/AWS, skip for other platforms or disconnected cluster!!") } var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") rcPingPodTemplate = filepath.Join(buildPruningBaseDir, "rc-ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") nadNS = []string{"79060-upgrade-ns1", "79060-upgrade-ns2"} servicename = "test-service" ) exutil.By("1. Create two namespaces") for i := 0; i < 2; i++ { oc.CreateSpecificNamespaceUDN(nadNS[i]) } exutil.By("2. Create CRD for layer3 UDN in namespace ns1") createGeneralUDNCRD(oc, nadNS[0], "udn-network-"+nadNS[0], "", "", "10.200.0.0/16", "layer3") exutil.By("3. Create CRD for layer2 UDN in namespace ns2") createGeneralUDNCRD(oc, nadNS[1], "udn-network-"+nadNS[1], "", "", "10.151.0.0/16", "layer2") exutil.By("4. Create pod for service per namespace") pods := make([]replicationControllerPingPodResource, 2) for i := 0; i < 2; i++ { pods[i] = replicationControllerPingPodResource{ name: "hello-pod", replicas: 1, namespace: nadNS[i], template: rcPingPodTemplate, } pods[i].createReplicaController(oc) err := waitForPodWithLabelReady(oc, pods[i].namespace, "name="+pods[i].name) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", pods[i].name)) } exutil.By("5. Create LoadBalancer service per namespace") svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { svc[i] = genericServiceResource{ servicename: servicename, namespace: nadNS[i], protocol: "TCP", selector: pods[i].name, serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i], svc[i].servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc[i].servicename)) } exutil.By("6. Get LoadBalancer service URL") var svcExternalIP [2]string for i := 0; i < 2; i++ { if platform == "aws" { svcExternalIP[i] = getLBSVCHostname(oc, nadNS[i], svc[i].servicename) } else { svcExternalIP[i] = getLBSVCIP(oc, nadNS[i], svc[i].servicename) } e2e.Logf("Got service EXTERNAL-IP %s from namespace %s", svcExternalIP[i], nadNS[i]) o.Expect(svcExternalIP[i]).NotTo(o.BeEmpty()) } exutil.By("7. Curl the service from test runner") for i := 0; i < 2; i++ { svcURL := net.JoinHostPort(svcExternalIP[i], "27017") svcCmd := fmt.Sprintf("curl %s --connect-timeout 30", svcURL) e2e.Logf("svcCmd: %s", svcCmd) err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcCmd).Output() if err1 != nil || !strings.Contains(string(output), "Hello OpenShift") { e2e.Logf("got err: %v, and try next round", err1) return false, nil } e2e.Logf("The service %s access passed!", svcURL) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Fail to curl the service EXTERNAL-IP %s from test runner", svcURL)) } }) g.It("Author:qiowang-ConnectedOnly-PstChkUpgrade-High-79060-Validate UDN LoadBalancer service post upgrade", func() { platform := exutil.CheckPlatform(oc) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") || strings.Contains(platform, "aws") if !acceptedPlatform { g.Skip("Test cases should be run on connected GCP/Azure/AWS, skip for other platforms or disconnected cluster!!") } var ( nadNS = []string{"79060-upgrade-ns1", "79060-upgrade-ns2"} servicename = "test-service" ) exutil.By("1. Check the two namespaces are carried over") for i := 0; i < 2; i++ { nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", nadNS[i]).Execute() if nsErr != nil { g.Skip("Skip the PstChkUpgrade test as namespace " + nadNS[i] + " does not exist, PreChkUpgrade test did not run") } } for i := 0; i < 2; i++ { defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", nadNS[i], "--ignore-not-found=true").Execute() } exutil.By("2. Get LoadBalancer service URL") var svcExternalIP [2]string for i := 0; i < 2; i++ { if platform == "aws" { svcExternalIP[i] = getLBSVCHostname(oc, nadNS[i], servicename) } else { svcExternalIP[i] = getLBSVCIP(oc, nadNS[i], servicename) } e2e.Logf("Got service EXTERNAL-IP %s from namespace %s", svcExternalIP[i], nadNS[i]) o.Expect(svcExternalIP[i]).NotTo(o.BeEmpty()) } exutil.By("3. Curl the service from test runner") for i := 0; i < 2; i++ { svcURL := net.JoinHostPort(svcExternalIP[i], "27017") svcCmd := fmt.Sprintf("curl %s --connect-timeout 30", svcURL) e2e.Logf("svcCmd: %s", svcCmd) err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcCmd).Output() if err1 != nil || !strings.Contains(string(output), "Hello OpenShift") { e2e.Logf("got err: %v, and try next round", err1) return false, nil } e2e.Logf("The service %s access passed!", svcURL) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Fail to curl the service EXTERNAL-IP %s from test runner", svcURL)) } }) // author: [email protected] g.It("Author:huirwang-PreChkUpgrade-High-79034-Validate UDN clusterIP/nodePort service post upgrade.", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml") allNS = []string{"79034-upgrade-ns1", "79034-upgrade-ns2", "79034-upgrade-ns3", "79034-upgrade-ns4"} rcPingPodTemplate = filepath.Join(buildPruningBaseDir, "rc-ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) exutil.By("1. create four new namespaces") for i := 0; i < 4; i++ { oc.CreateSpecificNamespaceUDN(allNS[i]) } exutil.By("2. Create CRD for layer3 UDN in namespace ns1, ns2") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipFamilyPolicy = "PreferDualStack" ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } for i := 0; i < 2; i++ { createGeneralUDNCRD(oc, allNS[i], "udn-network-"+allNS[i], ipv4cidr, ipv6cidr, cidr, "layer3") } exutil.By("3. Create CRD for layer2 UDN in namespace ns3,ns4.") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipFamilyPolicy = "PreferDualStack" ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" } } for i := 2; i < 4; i++ { createGeneralUDNCRD(oc, allNS[i], "udn-network-"+allNS[i], ipv4cidr, ipv6cidr, cidr, "layer2") } exutil.By("4. Create test pod in each namespace") podsBackend := make([]replicationControllerPingPodResource, 4) for i := 0; i < 4; i++ { podsBackend[i] = replicationControllerPingPodResource{ name: "hello-pod", replicas: 1, namespace: allNS[i], template: rcPingPodTemplate, } podsBackend[i].createReplicaController(oc) err := waitForPodWithLabelReady(oc, podsBackend[i].namespace, "name="+podsBackend[i].name) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", podsBackend[i].name)) } exutil.By("5. Create ClusterIP service in ns1,ns3,nodePort svc in ns2,ns4") svc := make([]genericServiceResource, 4) var serviceType string for i := 0; i < 4; i++ { if i == 1 || i == 3 { serviceType = "NodePort" } else { serviceType = "ClusterIP" } svc[i] = genericServiceResource{ servicename: "test-service", namespace: allNS[i], protocol: "TCP", selector: "hello-pod", serviceType: serviceType, ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) } exutil.By("6. Create udn clients in each namespace") var udnClient []string for i := 0; i < 4; i++ { createResourceFromFile(oc, allNS[i], statefulSetHelloPod) podErr := waitForPodWithLabelReady(oc, allNS[i], "app=hello") exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready") udnClient = append(udnClient, getPodName(oc, allNS[i], "app=hello")[0]) } exutil.By("7. Verify the pod2service connection in ns1 for layer3.") CurlPod2SvcPass(oc, allNS[0], allNS[0], udnClient[0], svc[0].servicename) exutil.By("8. Verify the pod2service connection in ns3 for layer2.") CurlPod2SvcPass(oc, allNS[2], allNS[2], udnClient[2], svc[2].servicename) exutil.By("9. Verify the pod2service isolation from ns2 to ns1 for layer3") CurlPod2SvcFail(oc, allNS[1], allNS[0], udnClient[1], svc[0].servicename) exutil.By("10. Verify the pod2service isolation from ns4 to ns3 for layer2") CurlPod2SvcFail(oc, allNS[3], allNS[2], udnClient[3], svc[2].servicename) exutil.By("11. Verify the nodePort service in ns2 can be accessed for layer3.") nodePortNS2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[1], svc[1].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("There are less than 2 worker nodes and nodePort service validation will be skipped! ") } clientNode := nodeList.Items[0].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS2) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS2) exutil.By("12. Verify the nodePort service in ns4 can be accessed for layer2.") nodePortNS4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[3], svc[3].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS4) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS4) }) // author: [email protected] g.It("Author:huirwang-PstChkUpgrade-High-79034-Validate UDN clusterIP/nodePort service post upgrade.", func() { var ( allNS = []string{"79034-upgrade-ns1", "79034-upgrade-ns2", "79034-upgrade-ns3", "79034-upgrade-ns4"} svcName = "test-service" ) for i := 0; i < 4; i++ { nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", allNS[i]).Execute() if nsErr != nil { g.Skip(fmt.Sprintf("Skip the PstChkUpgrade test as %s namespace does not exist, PreChkUpgrade test did not run", allNS[i])) } } for i := 0; i < 4; i++ { defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", allNS[i], "--ignore-not-found=true").Execute() } exutil.By("1. Get udn clients from preserved four namespaces") var udnClient []string for i := 0; i < 4; i++ { podErr := waitForPodWithLabelReady(oc, allNS[i], "app=hello") exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready") udnClient = append(udnClient, getPodName(oc, allNS[i], "app=hello")[0]) } exutil.By("2. Verify the pod2service connection in ns1 for layer3.") CurlPod2SvcPass(oc, allNS[0], allNS[0], udnClient[0], svcName) exutil.By("3. Verify the pod2service connection in ns3 for layer2.") CurlPod2SvcPass(oc, allNS[2], allNS[2], udnClient[2], svcName) exutil.By("4. Verify the pod2service isolation from ns2 to ns1 for layer3") CurlPod2SvcFail(oc, allNS[1], allNS[0], udnClient[1], svcName) exutil.By("5. Verify the pod2service isolation from ns4 to ns3 for layer2") CurlPod2SvcFail(oc, allNS[3], allNS[2], udnClient[3], svcName) exutil.By("6. Verify the nodePort service in ns2 can be accessed for layer3.") nodePortNS2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[1], svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("There are less than 2 worker nodes and nodePort service validation will be skipped! ") } clientNode := nodeList.Items[0].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS2) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS2) exutil.By("7. Verify the nodePort service in ns4 can be accessed for layer2.") nodePortNS4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[3], svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS4) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS4) }) g.It("Author:qiowang-NonHyperShiftHOST-PreChkUpgrade-Medium-44790-High-79163-Validate ExternalIP service for default and UDN pods post upgrade [Disruptive]", func() { nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } ipStackType := checkIPStackType(oc) var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") rcPingPodTemplate = filepath.Join(buildPruningBaseDir, "rc-ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") allNS = []string{"79163-upgrade-ns1", "79163-upgrade-ns2", "79163-upgrade-ns3"} ipFamilyPolicy = "SingleStack" serviceName = "test-service" ) exutil.By("1. Create three namespaces, ns1 and ns2 for udn network testing, ns3 for default network testing") for i := 0; i < 2; i++ { oc.CreateSpecificNamespaceUDN(allNS[i]) } oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", allNS[2]).Execute() exutil.By("2. Find externalIP for testing") var externalIP, externalIPv6 []string for i := 0; i < 3; i++ { nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[i].Name) externalIP = append(externalIP, nodeIP2) if ipStackType == "dualstack" { externalIPv6 = append(externalIPv6, nodeIP1) } } exutil.By("3. Patch network.config to enable externalIP") allowedCIDRs := `"` + externalIP[0] + `","` + externalIP[1] + `","` + externalIP[2] + `"` if ipStackType == "dualstack" { allowedCIDRs = allowedCIDRs + `,"` + externalIPv6[0] + `","` + externalIPv6[1] + `","` + externalIPv6[2] + `"` } patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":["+allowedCIDRs+"]}}}}") exutil.By("4. Create CRD for layer3 UDN in namespace ns1") var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, allNS[0], "udn-network-"+allNS[0], ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("5. Create CRD for layer2 UDN in namespace ns2") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, allNS[1], "udn-network-"+allNS[1], ipv4cidr, ipv6cidr, cidr, "layer2") exutil.By("6. Create pod as backend pod for service in each ns") var podsBackendName []string for i := 0; i < 3; i++ { podsBackend := replicationControllerPingPodResource{ name: "hello-pod-1", replicas: 0, namespace: allNS[i], template: rcPingPodTemplate, } podsBackend.createReplicaController(oc) e2e.Logf("schedual backend pod to " + nodeList.Items[i].Name) patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("rc/"+podsBackend.name, "-n", allNS[i], "-p", "{\"spec\":{\"replicas\":1,\"template\":{\"spec\":{\"nodeSelector\":{\"kubernetes.io/hostname\":\""+nodeList.Items[i].Name+"\"}}}}}", "--type=merge").Execute() o.Expect(patchErr).NotTo(o.HaveOccurred()) err := waitForPodWithLabelReady(oc, podsBackend.namespace, "name="+podsBackend.name) exutil.AssertWaitPollNoErr(err, "The backend pod is not ready") podsBackendName = append(podsBackendName, getPodName(oc, allNS[i], "name="+podsBackend.name)[0]) } exutil.By("7. Create udn client pod on different node in ns1 and ns2") var udnClientName []string for i := 0; i < 2; i++ { udnClient := replicationControllerPingPodResource{ name: "hello-pod-2", replicas: 0, namespace: allNS[i], template: rcPingPodTemplate, } udnClient.createReplicaController(oc) e2e.Logf("schedual udn client pod to " + nodeList.Items[2].Name) patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("rc/"+udnClient.name, "-n", allNS[i], "-p", "{\"spec\":{\"replicas\":1,\"template\":{\"spec\":{\"nodeSelector\":{\"kubernetes.io/hostname\":\""+nodeList.Items[2].Name+"\"}}}}}", "--type=merge").Execute() o.Expect(patchErr).NotTo(o.HaveOccurred()) err := waitForPodWithLabelReady(oc, udnClient.namespace, "name="+udnClient.name) exutil.AssertWaitPollNoErr(err, "The udn client pod is not ready") udnClientName = append(udnClientName, getPodName(oc, allNS[i], "name="+udnClient.name)[0]) } exutil.By("8. Create a ClusterIP service in each ns") for i := 0; i < 3; i++ { svc := genericServiceResource{ servicename: serviceName, namespace: allNS[i], protocol: "TCP", selector: "hello-pod-1", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) e2e.Logf("Patch ExternalIP to service") patchResourceAsAdmin(oc, "svc/"+svc.servicename, fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP[i]), allNS[i]) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[i], svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(externalIP[i])) } exutil.By("9. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("10. Validate the externalIP service for layer3 UDN") } else { exutil.By("11. Validate the externalIP service for layer2 UDN") } exutil.By("Validate the externalIP service can be accessed from another udn pod") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } if ipStackType == "dualstack" { exutil.By("Retest it with IPv6 address in dualstack cluster") exutil.By("12. Patch IPv6 ExternalIP to service") for i := 0; i < 3; i++ { patchResourceAsAdmin(oc, "svc/"+serviceName, fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\",\"%s\"]}}", externalIP[i], externalIPv6[i]), allNS[i]) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[i], serviceName).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(serviceName)) } exutil.By("13. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("14. Validate the externalIP service for layer3 UDN - ipv6") } else { exutil.By("15. Validate the externalIP service for layer2 UDN - ipv6") } exutil.By("Validate the externalIP service can be accessed from another udn pod - ipv6") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } } }) g.It("Author:qiowang-NonHyperShiftHOST-PstChkUpgrade-Medium-44790-High-79163-Validate ExternalIP service for default and UDN pods post upgrade [Disruptive]", func() { defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } ipStackType := checkIPStackType(oc) var ( allNS = []string{"79163-upgrade-ns1", "79163-upgrade-ns2", "79163-upgrade-ns3"} podBackendLabel = "hello-pod-1" udnClientLabel = "hello-pod-2" ) exutil.By("1. Check the three namespaces are carried over") for i := 0; i < 3; i++ { nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", allNS[i]).Execute() if nsErr != nil { g.Skip("Skip the PstChkUpgrade test as namespace " + allNS[i] + " does not exist, PreChkUpgrade test did not run") } } for i := 0; i < 3; i++ { defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", allNS[i], "--ignore-not-found=true").Execute() } exutil.By("2. Get externalIP for testing") var externalIP, externalIPv6 []string for i := 0; i < 3; i++ { nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[i].Name) externalIP = append(externalIP, nodeIP2) if ipStackType == "dualstack" { externalIPv6 = append(externalIPv6, nodeIP1) } } exutil.By("3. Get backend pod from preserved namespaces") var podsBackendName []string for i := 0; i < 3; i++ { err := waitForPodWithLabelReady(oc, allNS[i], "name="+podBackendLabel) exutil.AssertWaitPollNoErr(err, "The backend pod is not ready") podsBackendName = append(podsBackendName, getPodName(oc, allNS[i], "name="+podBackendLabel)[0]) } exutil.By("4. Get udn clients from preserved namespaces") var udnClientName []string for i := 0; i < 2; i++ { err := waitForPodWithLabelReady(oc, allNS[i], "name="+udnClientLabel) exutil.AssertWaitPollNoErr(err, "The udn client pod is not ready") udnClientName = append(udnClientName, getPodName(oc, allNS[i], "name="+udnClientLabel)[0]) } exutil.By("5. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("6. Validate the externalIP service for layer3 UDN") } else { exutil.By("7. Validate the externalIP service for layer2 UDN") } exutil.By("Validate the externalIP service can be accessed from another udn pod") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } if ipStackType == "dualstack" { exutil.By("Retest it with IPv6 address in dualstack cluster") exutil.By("8. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("9. Validate the externalIP service for layer3 UDN - ipv6") } else { exutil.By("10. Validate the externalIP service for layer2 UDN - ipv6") } exutil.By("Validate the externalIP service can be accessed from another udn pod - ipv6") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } } }) })
package networking
test case
openshift/openshift-tests-private
b416e17f-8ab5-4c36-a8b3-a6d0086a5f64
Author:huirwang-High-76017-Service should be able to access for same NAD UDN pods in different namespaces (L3/L2).
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-High-76017-Service should be able to access for same NAD UDN pods in different namespaces (L3/L2).", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") mtu int32 = 1300 ipFamilyPolicy = "SingleStack" ) ipStackType := checkIPStackType(oc) exutil.By("Get first namespace") var nadNS []string = make([]string, 0, 4) exutil.By("Create another 3 namespaces") for i := 0; i < 4; i++ { oc.CreateNamespaceUDN() nadNS = append(nadNS, oc.Namespace()) } nadResourcename := []string{"l3-network-test", "l2-network-test"} topo := []string{"layer3", "layer3", "layer2", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.150.0.0/16/24", "10.152.0.0/16", "10.152.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2010:100:200::0/60", "2012:100:200::0/60", "2012:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"} ipFamilyPolicy = "PreferDualStack" } } exutil.By("5. Create same NAD in ns1 ns2 for layer3") nad := make([]udnNetDefResource, 4) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[0], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[0], namespace: nadNS[i], nad_network_name: nadResourcename[0], // Need to use same nad name topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[0], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("6. Create same NAD in ns3 ns4 for layer 2") for i := 2; i < 4; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[1], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[1], namespace: nadNS[i], nad_network_name: nadResourcename[1], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[1], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("7. Create one pod in respective namespaces ns1,ns2,ns3,ns4") pod := make([]udnPodResource, 4) for i := 0; i < 4; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) // add a step to check ovn-udn1 created. output, err := e2eoutput.RunHostCmd(pod[i].namespace, pod[i].name, "ip -o link show") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("ovn-udn1")) } exutil.By("8. Create service in ns2,ns4") svc1 := genericServiceResource{ servicename: "test-service", namespace: nadNS[1], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc1.createServiceFromParams(oc) svc2 := genericServiceResource{ servicename: "test-service", namespace: nadNS[3], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc2.createServiceFromParams(oc) exutil.By("9. Verify ClusterIP service in ns2 can be accessed from pod in ns1 for layer 3") CurlPod2SvcPass(oc, nadNS[0], nadNS[1], pod[0].name, svc1.servicename) exutil.By("10. Verify ClusterIP service in ns4 can be accessed from pod in ns3 for layer 2") CurlPod2SvcPass(oc, nadNS[2], nadNS[3], pod[2].name, svc2.servicename) })
test case
openshift/openshift-tests-private
00a18eb1-0b1c-41f0-8ce0-530c50faafcf
Author:huirwang-Medium-76016-Service exists before NAD is created (L3/L2).
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-Medium-76016-Service exists before NAD is created (L3/L2).", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") mtu int32 = 1300 ipFamilyPolicy = "SingleStack" ) ipStackType := checkIPStackType(oc) exutil.By("1. Create first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l3-network-" + ns1, "l2-network-" + ns2} nadNS := []string{ns1, ns2} topo := []string{"layer3", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.152.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2012:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"} ipFamilyPolicy = "PreferDualStack" } } exutil.By("3. Create a service without any serving pods") svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) } exutil.By("4. Create NAD in ns1 ns2 for layer3,layer2") nad := make([]udnNetDefResource, 4) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } exutil.By("7. Create 2 pods in ns1,ns2") pod := make([]udnPodResource, 4) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("7. Create another two pods in ns1,ns2") for i := 2; i < 4; i++ { pod[i] = udnPodResource{ name: "hello-pod-test", namespace: nadNS[i-2], label: "hello-pod-test", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("Verify the service can be accessed for layer 3") CurlPod2SvcPass(oc, ns1, ns1, pod[2].name, svc[0].servicename) exutil.By("Verify the service can be accessed for layer 2") CurlPod2SvcPass(oc, ns2, ns2, pod[3].name, svc[1].servicename) })
test case
openshift/openshift-tests-private
3291d8a1-1f26-4aae-9e01-c9efa9d28536
Author:huirwang-High-76796-Idling/Unidling services should work for UDN pods. (L3/L2).
['"context"', '"fmt"', '"net"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-High-76796-Idling/Unidling services should work for UDN pods. (L3/L2).", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") mtu int32 = 1300 ) ipStackType := checkIPStackType(oc) exutil.By("1.Get first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadResourcename := []string{"l3-network-" + ns1, "l2-network-" + ns2} nadNS := []string{ns1, ns2} topo := []string{"layer3", "layer2"} var subnet []string if ipStackType == "ipv4single" { subnet = []string{"10.150.0.0/16/24", "10.152.0.0/16"} } else { if ipStackType == "ipv6single" { subnet = []string{"2010:100:200::0/60", "2012:100:200::0/60"} } else { subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"} } } exutil.By("3. Create NAD in ns1 ns2 for layer3,layer2") nad := make([]udnNetDefResource, 4) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i])) nad[i] = udnNetDefResource{ nadname: nadResourcename[i], namespace: nadNS[i], nad_network_name: nadResourcename[i], topology: topo[i], subnet: subnet[i], mtu: mtu, net_attach_def_name: nadNS[i] + "/" + nadResourcename[i], role: "primary", template: udnNadtemplate, } nad[i].createUdnNad(oc) } for i := 0; i < len(nadNS); i++ { exutil.By(fmt.Sprintf("Create a service in namespace %v.", nadNS[i])) createResourceFromFile(oc, nadNS[i], testSvcFile) waitForPodWithLabelReady(oc, nadNS[i], "name=test-pods") svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } if ipStackType == "dualstack" { svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("Recreate dualstack service in namepsace %v.", nadNS[i])) removeResource(oc, true, true, "service", "test-service", "-n", nadNS[i]) svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "test-pods", serviceType: "ClusterIP", ipFamilyPolicy: "PreferDualStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } } exutil.By("6. idle test-service") idleOutput, idleErr := oc.AsAdmin().WithoutNamespace().Run("idle").Args("-n", ns1, "test-service").Output() o.Expect(idleErr).NotTo(o.HaveOccurred()) o.Expect(idleOutput).To(o.ContainSubstring("The service \"%v/test-service\" has been marked as idled", ns1)) idleOutput, idleErr = oc.AsAdmin().WithoutNamespace().Run("idle").Args("-n", ns2, "test-service").Output() o.Expect(idleErr).NotTo(o.HaveOccurred()) o.Expect(idleOutput).To(o.ContainSubstring("The service \"%v/test-service\" has been marked as idled", ns2)) exutil.By("7. check test pod in ns1 terminated") getPodOutput := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { output, getPodErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns1).Output() o.Expect(getPodErr).NotTo(o.HaveOccurred()) e2e.Logf("pods status: %s", output) if strings.Contains(output, "No resources found") { return true, nil } e2e.Logf("pods are not terminated, try again") return false, nil }) exutil.AssertWaitPollNoErr(getPodOutput, fmt.Sprintf("Fail to terminate pods:%s", getPodOutput)) exutil.By("8. check test pod in ns2 terminated") getPodOutput = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { output, getPodErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns2).Output() o.Expect(getPodErr).NotTo(o.HaveOccurred()) e2e.Logf("pods status: %s", output) if strings.Contains(output, "No resources found") { return true, nil } e2e.Logf("pods are not terminated, try again") return false, nil }) exutil.AssertWaitPollNoErr(getPodOutput, fmt.Sprintf("Fail to terminate pods:%s", getPodOutput)) exutil.By("9. Create a test pod in ns1,ns2") pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("10. Verify unidling the service can be accessed for layer 3") svcIP1, svcIP2 := getSvcIP(oc, ns1, "test-service") if svcIP2 != "" { _, err := e2eoutput.RunHostCmdWithRetries(ns1, pod[0].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod[0].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP2, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmdWithRetries(ns1, pod[0].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("11. Verify unidling the service can be accessed for layer 2") svcIP1, svcIP2 = getSvcIP(oc, ns2, "test-service") if svcIP2 != "" { _, err := e2eoutput.RunHostCmdWithRetries(ns2, pod[1].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmdWithRetries(ns2, pod[1].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP2, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmdWithRetries(ns2, pod[1].name, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } })
test case
openshift/openshift-tests-private
2b8f3447-ed74-4673-8c6f-d57440f809fe
Author:huirwang-Critical-76732-Validate pod2Service/nodePortService for UDN(Layer2)
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-Critical-76732-Validate pod2Service/nodePortService for UDN(Layer2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_dualstack_template.yaml") udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } exutil.By("Create CRD for UDN") var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-76732", namespace: ns1, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrd.createLayer2DualStackUDNCRD(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-76732", namespace: ns1, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrd.createLayer2SingleStackUDNCRD(oc) } exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") clientPod1 := pingPodResourceNode{ name: "client-pod-1", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } clientPod1.createPingPodNode(oc) waitPodReady(oc, clientPod1.namespace, clientPod1.name) // Update label for pod2 to a different one err := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", clientPod1.name, "name=client-pod-1", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns1 on same node as pod1") clientPod2 := pingPodResourceNode{ name: "client-pod-2", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } clientPod2.createPingPodNode(oc) waitPodReady(oc, clientPod2.namespace, clientPod2.name) // Update label for pod3 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", clientPod2.name, "name=client-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. create a service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both clientPod1 and clientPod2") CurlPod2SvcPass(oc, ns1, ns1, clientPod1.name, svc.servicename) CurlPod2SvcPass(oc, ns1, ns1, clientPod2.name, svc.servicename) exutil.By("8. Create a second namespace") oc.SetupProject() ns2 := oc.Namespace() exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, ns2, testPodFile) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, ns2, "name=test-pods") exutil.By("10. Not be able to access udn service from default network.") CurlPod2SvcFail(oc, ns2, ns1, testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from udn network.") CurlPod2SvcFail(oc, ns1, ns2, clientPod1.name, "test-service") exutil.By("11. Create third namespace for udn pod") oc.CreateNamespaceUDN() ns3 := oc.Namespace() exutil.By("12. Create CRD in third namespace") if ipStackType == "ipv4single" { cidr = "10.160.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:200:200::0/48" } else { ipv4cidr = "10.160.0.0/16" ipv6cidr = "2010:200:200::0/48" } } var udncrdns3 udnCRDResource if ipStackType == "dualstack" { udncrdns3 = udnCRDResource{ crdname: "udn-network-ds-76732-ns3", namespace: ns3, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv6cidr: ipv6cidr, template: udnCRDdualStack, } udncrdns3.createLayer2DualStackUDNCRD(oc) } else { udncrdns3 = udnCRDResource{ crdname: "udn-network-ss-76732-ns3", namespace: ns3, role: "Primary", mtu: 1400, cidr: cidr, template: udnCRDSingleStack, } udncrdns3.createLayer2SingleStackUDNCRD(oc) } err = waitUDNCRDApplied(oc, ns3, udncrdns3.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in third namespace") createResourceFromFile(oc, ns3, testPodFile) err = waitForPodWithLabelReady(oc, ns3, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, ns3, "name=test-pods") exutil.By("14. Verify different udn network, service was isolated.") CurlPod2SvcFail(oc, ns3, ns1, testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for udn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, ns1, ns1, clientPod2.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, ns1, ns1, clientPod1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") removeResource(oc, true, true, "service", "test-service", "-n", ns1) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) //Ignore below steps because of bug https://issues.redhat.com/browse/OCPBUGS-43085 exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) })
test case
openshift/openshift-tests-private
b2ef9c6b-e804-4946-bcc8-1b882f6e55e2
Author:huirwang-Critical-75942-Validate pod2Service/nodePortService for UDN(Layer3)
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-Critical-75942-Validate pod2Service/nodePortService for UDN(Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_dualstack2_template.yaml") udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" prefix = 24 } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" prefix = 64 } else { ipv4cidr = "10.150.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:100:200::0/48" ipv6prefix = 64 ipFamilyPolicy = "PreferDualStack" } } var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-ds-75942", namespace: ns1, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-ss-75942", namespace: ns1, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err := waitUDNCRDApplied(oc, ns1, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns1 on same node as pod1") pod3ns1 := pingPodResourceNode{ name: "hello-pod-3", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod3ns1.createPingPodNode(oc) waitPodReady(oc, pod3ns1.namespace, pod3ns1.name) // Update label for pod3 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod3ns1.name, "name=hello-pod-3", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both pod2 and pod3") CurlPod2SvcPass(oc, ns1, ns1, pod2ns1.name, svc.servicename) CurlPod2SvcPass(oc, ns1, ns1, pod3ns1.name, svc.servicename) exutil.By("8. Create second namespace") oc.SetupProject() ns2 := oc.Namespace() exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, ns2, testPodFile) err = waitForPodWithLabelReady(oc, ns2, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, ns2, "name=test-pods") exutil.By("10. Not be able to access udn service from default network.") CurlPod2SvcFail(oc, ns2, ns1, testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from udn network.") CurlPod2SvcFail(oc, ns1, ns2, pod2ns1.name, "test-service") exutil.By("11. Create third namespace for udn pod") oc.CreateNamespaceUDN() ns3 := oc.Namespace() exutil.By("12. Create CRD in third namespace") if ipStackType == "ipv4single" { cidr = "10.160.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:200:200::0/48" prefix = 64 } else { ipv4cidr = "10.160.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:200:200::0/48" ipv6prefix = 64 } } var udncrdns3 udnCRDResource if ipStackType == "dualstack" { udncrdns3 = udnCRDResource{ crdname: "udn-network-ds-75942-ns3", namespace: ns3, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrdns3.createUdnCRDDualStack(oc) } else { udncrdns3 = udnCRDResource{ crdname: "udn-network-ss-75942-ns3", namespace: ns3, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrdns3.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, ns3, udncrdns3.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in third namespace") createResourceFromFile(oc, ns3, testPodFile) err = waitForPodWithLabelReady(oc, ns3, "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, ns3, "name=test-pods") exutil.By("14. Verify different udn network, service was isolated.") CurlPod2SvcFail(oc, ns3, ns1, testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for udn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, ns1, ns1, pod3ns1.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, ns1, ns1, pod2ns1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) //Ignore below steps because of bug https://issues.redhat.com/browse/OCPBUGS-43085 exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", ns1, "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) })
test case
openshift/openshift-tests-private
5f47616a-33f1-4de6-8daa-6372209e6a92
Author:meinli-Critical-78238-Validate host/pod to nodeport with externalTrafficPolicy is local/cluster on same/diff workers (UDN layer3 and default network)
['"context"', '"fmt"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:meinli-Critical-78238-Validate host/pod to nodeport with externalTrafficPolicy is local/cluster on same/diff workers (UDN layer3 and default network)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml") udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) exutil.By("0. Get three worker nodes") nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This case requires 3 nodes, but the cluster has less than three nodes") } exutil.By("1. Create two namespaces, first one is for default network and second is for UDN and then label namespaces") ns1 := oc.Namespace() oc.CreateNamespaceUDN() ns2 := oc.Namespace() ns := []string{ns1, ns2} for _, namespace := range ns { err = exutil.SetNamespacePrivileged(oc, namespace) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("2. Create UDN CRD in ns2") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string var prefix, ipv4prefix, ipv6prefix int32 if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" prefix = 24 } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" prefix = 64 } else { ipv4cidr = "10.150.0.0/16" ipv4prefix = 24 ipv6cidr = "2010:100:200::0/48" ipv6prefix = 64 ipFamilyPolicy = "PreferDualStack" } } var udncrd udnCRDResource if ipStackType == "dualstack" { udncrd = udnCRDResource{ crdname: "udn-network-ds-78238", namespace: ns2, role: "Primary", mtu: 1400, IPv4cidr: ipv4cidr, IPv4prefix: ipv4prefix, IPv6cidr: ipv6cidr, IPv6prefix: ipv6prefix, template: udnCRDdualStack, } udncrd.createUdnCRDDualStack(oc) } else { udncrd = udnCRDResource{ crdname: "udn-network-ss-78238", namespace: ns2, role: "Primary", mtu: 1400, cidr: cidr, prefix: prefix, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) } err = waitUDNCRDApplied(oc, ns2, udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create two pods and nodeport service with externalTrafficPolicy=Local in ns1 and ns2") nodeportsLocal := []string{} pods := make([]pingPodResourceNode, 2) svcs := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("3.%d Create pod and nodeport service with externalTrafficPolicy=Local in %s", i, ns[i])) pods[i] = pingPodResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns[i], nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pods[i].createPingPodNode(oc) waitPodReady(oc, ns[i], pods[i].name) svcs[i] = genericServiceResource{ servicename: "test-service" + strconv.Itoa(i), namespace: ns[i], protocol: "TCP", selector: "hello-pod", serviceType: "NodePort", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Local", template: genericServiceTemplate, } svcs[i].createServiceFromParams(oc) nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns[i], svcs[i].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeportsLocal = append(nodeportsLocal, nodePort) } exutil.By("4. Validate pod/host to nodeport service with externalTrafficPolicy=Local traffic") for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("4.1.%d Validate pod to nodeport service with externalTrafficPolicy=Local traffic in %s", i, ns[i])) CurlPod2NodePortPass(oc, ns[i], pods[i].name, nodeList.Items[0].Name, nodeportsLocal[i]) CurlPod2NodePortFail(oc, ns[i], pods[i].name, nodeList.Items[1].Name, nodeportsLocal[i]) } exutil.By("4.2 Validate host to nodeport service with externalTrafficPolicy=Local traffic on default network") CurlNodePortPass(oc, nodeList.Items[2].Name, nodeList.Items[0].Name, nodeportsLocal[0]) CurlNodePortFail(oc, nodeList.Items[2].Name, nodeList.Items[1].Name, nodeportsLocal[0]) exutil.By("4.3 Validate UDN pod to default network nodeport service with externalTrafficPolicy=Local traffic") CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[0].Name, nodeportsLocal[0]) CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[1].Name, nodeportsLocal[0]) exutil.By("5. Create nodeport service with externalTrafficPolicy=Cluster in ns1 and ns2") nodeportsCluster := []string{} for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("5.%d Create pod and nodeport service with externalTrafficPolicy=Cluster in %s", i, ns[i])) removeResource(oc, true, true, "svc", "test-service"+strconv.Itoa(i), "-n", ns[i]) svcs[i].externalTrafficPolicy = "Cluster" svcs[i].createServiceFromParams(oc) nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns[i], svcs[i].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeportsCluster = append(nodeportsCluster, nodePort) } exutil.By("6. Validate pod/host to nodeport service with externalTrafficPolicy=Cluster traffic") for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("6.1.%d Validate pod to nodeport service with externalTrafficPolicy=Cluster traffic in %s", i, ns[i])) CurlPod2NodePortPass(oc, ns[i], pods[i].name, nodeList.Items[0].Name, nodeportsCluster[i]) CurlPod2NodePortPass(oc, ns[i], pods[i].name, nodeList.Items[1].Name, nodeportsCluster[i]) } exutil.By("6.2 Validate host to nodeport service with externalTrafficPolicy=Cluster traffic on default network") CurlNodePortPass(oc, nodeList.Items[2].Name, nodeList.Items[0].Name, nodeportsCluster[0]) CurlNodePortPass(oc, nodeList.Items[2].Name, nodeList.Items[1].Name, nodeportsCluster[0]) exutil.By("6.3 Validate UDN pod to default network nodeport service with externalTrafficPolicy=Cluster traffic") CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[0].Name, nodeportsLocal[0]) CurlPod2NodePortFail(oc, ns[1], pods[1].name, nodeList.Items[1].Name, nodeportsLocal[0]) })
test case
openshift/openshift-tests-private
1684a4a6-131c-47af-b7ad-f65e15e784aa
Author:huirwang-High-76014-Validate LoadBalancer service for UDN pods (Layer3/Layer2)
['"context"', '"fmt"', '"net"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-High-76014-Validate LoadBalancer service for UDN pods (Layer3/Layer2)", func() { buildPruningBaseDir := exutil.FixturePath("testdata", "networking") udnPodTemplate := filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") udnCRDSingleStack := filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml") udnL2CRDSingleStack := filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml") platform := exutil.CheckPlatform(oc) e2e.Logf("platform %s", platform) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") || strings.Contains(platform, "aws") if !acceptedPlatform { g.Skip("Test cases should be run on connected AWS,GCP, Azure, skip for other platforms or disconnected cluster!!") } exutil.By("1. Get namespaces and create a new namespace ") oc.CreateNamespaceUDN() ns1 := oc.Namespace() oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadNS := []string{ns1, ns2} exutil.By("2. Create CRD for UDN for layer 3") udncrd := udnCRDResource{ crdname: "udn-network-l3-76014", namespace: nadNS[0], role: "Primary", mtu: 1400, cidr: "10.200.0.0/16", prefix: 24, template: udnCRDSingleStack, } udncrd.createUdnCRDSingleStack(oc) err := waitUDNCRDApplied(oc, nadNS[0], udncrd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("3. Create CRD for UDN for layer 2") udnl2crd := udnCRDResource{ crdname: "udn-network-l2-76014", namespace: nadNS[1], role: "Primary", mtu: 1400, cidr: "10.210.0.0/16", template: udnL2CRDSingleStack, } udnl2crd.createLayer2SingleStackUDNCRD(oc) err = waitUDNCRDApplied(oc, nadNS[1], udnl2crd.crdname) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("4. Create a pod for service per namespace.") pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("5. Create LoadBalancer service.") svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "hello-pod", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i], svc[i].servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc[i].servicename)) } exutil.By("6. Get LoadBalancer service URL.") var svcExternalIP [2]string for i := 0; i < 2; i++ { if platform == "aws" { svcExternalIP[i] = getLBSVCHostname(oc, nadNS[i], svc[i].servicename) } else { svcExternalIP[i] = getLBSVCIP(oc, nadNS[i], svc[i].servicename) } e2e.Logf("Got externalIP service IP: %v from namespace %s", svcExternalIP[i], nadNS[i]) o.Expect(svcExternalIP[i]).NotTo(o.BeEmpty()) } exutil.By("7.Curl the service from test runner\n") var svcURL, svcCmd [2]string for i := 0; i < 2; i++ { svcURL[i] = net.JoinHostPort(svcExternalIP[i], "27017") svcCmd[i] = fmt.Sprintf("curl %s --connect-timeout 30", svcURL[i]) e2e.Logf("\n svcCmd: %v\n", svcCmd[i]) err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcCmd[i]).Output() if err1 != nil || !strings.Contains(string(output), "Hello OpenShift") { e2e.Logf("got err:%v, and try next round", err1) return false, nil } e2e.Logf("The external service %v access passed!", svcURL[i]) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Fail to curl the externalIP service from test runner %s", svcURL[i])) } })
test case
openshift/openshift-tests-private
70424adc-2e15-4435-a282-546361873474
Author:huirwang-NonHyperShiftHOST-High-76019-Validate ExternalIP service for UDN pods (Layer3), [Disruptive]
['"context"', '"fmt"', '"net"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-NonHyperShiftHOST-High-76019-Validate ExternalIP service for UDN pods (Layer3), [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, ns1, "udn-network-76019-ns1", ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("6. Find externalIP") nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[0].Name) externalIP := nodeIP2 exutil.By("7.Patch update network.config to enable externalIP \n") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") if ipStackType == "dualstack" { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\",\""+nodeIP1+"\"]}}}}") } else { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\"]}}}}") } exutil.By("8.Patch ExternalIP to service\n") patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(externalIP)) exutil.By("9.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("10.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) if ipStackType == "dualstack" { exutil.By("10.Retest it with IPv6 address in dualstack cluster\n") exutil.By("11.Patch IPv6 ExternalIP to service\n") externalIP := nodeIP1 patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("12.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("14.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } })
test case
openshift/openshift-tests-private
b4238c33-b76c-41f0-be7d-eba1f9c5007d
Author:huirwang-High-77827-Restarting ovn pods should not break service. [Disruptive]
['"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-High-77827-Restarting ovn pods should not break service. [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml") ) exutil.By("1.Get first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create 2nd namespace") oc.CreateNamespaceUDN() ns2 := oc.Namespace() nadNS := []string{ns1, ns2} exutil.By("3. Create CRD for layer3 UDN in first namespace.") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } createGeneralUDNCRD(oc, nadNS[0], "udn-network-77827-ns1", ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("4. Create CRD for layer2 UDN in second namespace.") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" } } createGeneralUDNCRD(oc, nadNS[1], "udn-network-77827-ns2", ipv4cidr, ipv6cidr, cidr, "layer2") exutil.By("5. Create service and test pods in both namespaces.") for i := 0; i < len(nadNS); i++ { exutil.By(fmt.Sprintf("Create a service in namespace %v.", nadNS[i])) createResourceFromFile(oc, nadNS[i], testSvcFile) waitForPodWithLabelReady(oc, nadNS[i], "name=test-pods") svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } if ipStackType == "dualstack" { svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { exutil.By(fmt.Sprintf("Recreate dualstack service in namepsace %v.", nadNS[i])) removeResource(oc, true, true, "service", "test-service", "-n", nadNS[i]) svc[i] = genericServiceResource{ servicename: "test-service", namespace: nadNS[i], protocol: "TCP", selector: "test-pods", serviceType: "ClusterIP", ipFamilyPolicy: "PreferDualStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i]).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring("test-service")) } } exutil.By("6. Create a client test pod in ns1,ns2") pod := make([]udnPodResource, 2) for i := 0; i < 2; i++ { pod[i] = udnPodResource{ name: "hello-pod", namespace: nadNS[i], label: "hello-pod", template: udnPodTemplate, } pod[i].createUdnPod(oc) waitPodReady(oc, pod[i].namespace, pod[i].name) } exutil.By("7. Restart ovn pods") err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", "openshift-ovn-kubernetes").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.AssertAllPodsToBeReady(oc, "openshift-ovn-kubernetes") exutil.By("8. Verify the service can be accessed for layer2.") for i := 0; i < 3; i++ { CurlPod2SvcPass(oc, nadNS[1], nadNS[1], pod[1].name, "test-service") } exutil.By("9. Verify the service can be accessed for layer3.") /* https://issues.redhat.com/browse/OCPBUGS-44174 for i := 0; i < 3; i++ { CurlPod2SvcPass(oc, nadNS[0], nadNS[0], pod[0].name, "test-service") }*/ })
test case
openshift/openshift-tests-private
b310e863-1fa5-4a7c-a23f-adc85330807e
Author:huirwang-NonHyperShiftHOST-High-76731-Validate ExternalIP service for UDN pods (Layer2), [Disruptive]
['"context"', '"fmt"', '"net"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-NonHyperShiftHOST-High-76731-Validate ExternalIP service for UDN pods (Layer2), [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ") } exutil.By("1. Obtain first namespace") oc.CreateNamespaceUDN() ns1 := oc.Namespace() exutil.By("2. Create CRD for UDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, ns1, "udn-network-76731-ns1", ipv4cidr, ipv6cidr, cidr, "layer2") exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodTemplate, } pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1 on different node as pod1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: ns1, nodename: nodeList.Items[1].Name, template: pingPodTemplate, } pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns1, "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("6. Find externalIP") nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[0].Name) externalIP := nodeIP2 exutil.By("7.Patch update network.config to enable externalIP \n") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") if ipStackType == "dualstack" { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\",\""+nodeIP1+"\"]}}}}") } else { patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+nodeIP2+"\"]}}}}") } exutil.By("8.Patch ExternalIP to service\n") patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(externalIP)) exutil.By("9.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("10.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) if ipStackType == "dualstack" { exutil.By("10.Retest it with IPv6 address in dualstack cluster\n") exutil.By("11.Patch IPv6 ExternalIP to service\n") externalIP := nodeIP1 patchResourceAsAdmin(oc, "svc/test-service", fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP), ns1) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns1, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("12.Validate the externalIP service can be accessed from another udn pod. \n") _, err = e2eoutput.RunHostCmdWithRetries(ns1, pod2ns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP, "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("14.Validate the externalIP service can be accessed from same node as service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.Validate the externalIP service can be accessed from different node than service backend pod \n") _, err = exutil.DebugNode(oc, nodeList.Items[1].Name, "curl", net.JoinHostPort(externalIP, "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } })
test case
openshift/openshift-tests-private
4f61cb4b-07a1-42b9-8529-2b2c73d08e4d
Author:huirwang-High-78767-Validate service for CUDN(Layer3)
['"context"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-High-78767-Validate service for CUDN(Layer3)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" key = "test.cudn.layer3" crdName = "cudn-network-78767" crdName2 = "cudn-network-78767-2" values = []string{"value-78767-1", "value-78767-2"} values2 = []string{"value2-78767-1", "value2-78767-2"} cudnNS = []string{} ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Create CRD for CUDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer3", values) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2. Create 2 namespaces and add related values.") for i := 0; i < 2; i++ { oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s-", key)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: cudnNS[0], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns1.name, "-n", pod1ns1.namespace) pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: cudnNS[0], nodename: nodeList.Items[1].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod2ns1.name, "-n", pod2ns1.namespace) pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", cudnNS[0], "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns2. ") pod1ns2 := pingPodResourceNode{ name: "hello-pod-3", namespace: cudnNS[1], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns2.name, "-n", pod1ns2.namespace) pod1ns2.createPingPodNode(oc) waitPodReady(oc, pod1ns2.namespace, pod1ns2.name) exutil.By("6. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: cudnNS[0], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both pod2 in ns1 and pod3 in ns2") CurlPod2SvcPass(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("8. Create third namespace") oc.SetupProject() cudnNS = append(cudnNS, oc.Namespace()) exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, cudnNS[2], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[2], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, cudnNS[2], "name=test-pods") exutil.By("10. Not be able to access cudn service from default network.") CurlPod2SvcFail(oc, cudnNS[2], cudnNS[0], testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from cudn network.") CurlPod2SvcFail(oc, cudnNS[1], cudnNS[2], pod2ns1.name, "test-service") exutil.By("11. Create fourth namespace for cudn pod") oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s=%s", key, values2[0])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("12. Create CRD in fourth namespace") if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer func() { oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s-", key)).Execute() removeResource(oc, true, true, "namespace", cudnNS[3]) removeResource(oc, true, true, "clusteruserdefinednetwork", crdName2) }() _, err = createCUDNCRD(oc, key, crdName2, ipv4cidr, ipv6cidr, cidr, "layer3", values2) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in fourth namespace") createResourceFromFile(oc, cudnNS[3], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[3], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, cudnNS[3], "name=test-pods") exutil.By("14. Verify different cudn network, service was isolated.") CurlPod2SvcFail(oc, cudnNS[3], cudnNS[0], testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for cudn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", cudnNS[0]).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", cudnNS[0], svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) })
test case
openshift/openshift-tests-private
931b1bf8-6512-4c54-8cb5-24af8ad2b29e
Author:huirwang-High-78768-Validate service for CUDN(Layer2)
['"context"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-High-78768-Validate service for CUDN(Layer2)", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml") ipFamilyPolicy = "SingleStack" key = "test.cudn.layer2" crdName = "cudn-network-78768" crdName2 = "cudn-network-78768-2" values = []string{"value-78768-1", "value-78768-2"} values2 = []string{"value2-78768-1", "value2-78768-2"} cudnNS = []string{} ) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } exutil.By("1. Create CRD for CUDN") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName) _, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer2", values) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2. Create 2 namespaces and add related values.") for i := 0; i < 2; i++ { oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s-", key)).Execute() err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("3. Create a pod deployed on node0 as backend pod for service.") pod1ns1 := pingPodResourceNode{ name: "hello-pod-1", namespace: cudnNS[0], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns1.name, "-n", pod1ns1.namespace) pod1ns1.createPingPodNode(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) g.By("4. create a udn client pod in ns1") pod2ns1 := pingPodResourceNode{ name: "hello-pod-2", namespace: cudnNS[0], nodename: nodeList.Items[1].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod2ns1.name, "-n", pod2ns1.namespace) pod2ns1.createPingPodNode(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) // Update label for pod2 to a different one err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", cudnNS[0], "pod", pod2ns1.name, "name=hello-pod-2", "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("5. create a udn client pod in ns2. ") pod1ns2 := pingPodResourceNode{ name: "hello-pod-3", namespace: cudnNS[1], nodename: nodeList.Items[0].Name, template: pingPodTemplate, } defer removeResource(oc, true, true, "pod", pod1ns2.name, "-n", pod1ns2.namespace) pod1ns2.createPingPodNode(oc) waitPodReady(oc, pod1ns2.namespace, pod1ns2.name) exutil.By("6. create a ClusterIP service in ns1") svc := genericServiceResource{ servicename: "test-service", namespace: cudnNS[0], protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("7. Verify ClusterIP service can be accessed from both pod2 in ns1 and pod3 in ns2") CurlPod2SvcPass(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("8. Create third namespace") oc.SetupProject() cudnNS = append(cudnNS, oc.Namespace()) exutil.By("9. Create service and pods which are on default network.") createResourceFromFile(oc, cudnNS[2], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[2], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodName := getPodName(oc, cudnNS[2], "name=test-pods") exutil.By("10. Not be able to access cudn service from default network.") CurlPod2SvcFail(oc, cudnNS[2], cudnNS[0], testPodName[0], svc.servicename) exutil.By("11. Not be able to access default network service from cudn network.") CurlPod2SvcFail(oc, cudnNS[1], cudnNS[2], pod2ns1.name, "test-service") exutil.By("11. Create fourth namespace for cudn pod") oc.CreateNamespaceUDN() cudnNS = append(cudnNS, oc.Namespace()) err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s=%s", key, values2[0])).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("12. Create CRD in fourth namespace") if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/60" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/60" } } defer func() { oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[3], fmt.Sprintf("%s-", key)).Execute() removeResource(oc, true, true, "namespace", cudnNS[3]) removeResource(oc, true, true, "clusteruserdefinednetwork", crdName2) }() _, err = createCUDNCRD(oc, key, crdName2, ipv4cidr, ipv6cidr, cidr, "layer2", values2) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("13. Create a udn pod in fourth namespace") createResourceFromFile(oc, cudnNS[3], testPodFile) err = waitForPodWithLabelReady(oc, cudnNS[3], "name=test-pods") exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready") testPodNameNS3 := getPodName(oc, cudnNS[3], "name=test-pods") exutil.By("14. Verify different cudn network, service was isolated.") CurlPod2SvcFail(oc, cudnNS[3], cudnNS[0], testPodNameNS3[0], svc.servicename) exutil.By("15.Update internalTrafficPolicy as Local for cudn service in ns1.") patch := `[{"op": "replace", "path": "/spec/internalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("15.1. Verify ClusterIP service can be accessed from pod3 which is deployed same node as service back-end pod.") CurlPod2SvcPass(oc, cudnNS[1], cudnNS[0], pod1ns2.name, svc.servicename) exutil.By("15.2. Verify ClusterIP service can NOT be accessed from pod2 which is deployed different node as service back-end pod.") CurlPod2SvcFail(oc, cudnNS[0], cudnNS[0], pod2ns1.name, svc.servicename) exutil.By("16. Verify nodePort service can be accessed.") exutil.By("16.1 Delete testservice from ns1") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", cudnNS[0]).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("16.2 Create testservice with NodePort in ns1") svc.serviceType = "NodePort" svc.createServiceFromParams(oc) exutil.By("16.3 From a third node, be able to access node0:nodePort") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", cudnNS[0], svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) thirdNode := nodeList.Items[2].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("16.4 From a third node, be able to access node1:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[1].Name, nodePort) exutil.By("16.5 From pod node, be able to access nodePort service") CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, nodeList.Items[0].Name, nodeList.Items[1].Name, nodePort) exutil.By("17.Update externalTrafficPolicy as Local for udn service in ns1.") patch = `[{"op": "replace", "path": "/spec/externalTrafficPolicy", "value": "Local"}]` err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("service/test-service", "-n", cudnNS[0], "-p", patch, "--type=json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("17.1 From a third node, be able to access node0:nodePort") CurlNodePortPass(oc, thirdNode, nodeList.Items[0].Name, nodePort) exutil.By("17.2 From a third node, NOT be able to access node1:nodePort") CurlNodePortFail(oc, thirdNode, nodeList.Items[1].Name, nodePort) })
test case
openshift/openshift-tests-private
a2bb7cff-b6a8-4cd7-8790-b5ba84439fd8
Author:qiowang-ConnectedOnly-PreChkUpgrade-High-79060-Validate UDN LoadBalancer service post upgrade
['"context"', '"fmt"', '"net"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:qiowang-ConnectedOnly-PreChkUpgrade-High-79060-Validate UDN LoadBalancer service post upgrade", func() { platform := exutil.CheckPlatform(oc) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") || strings.Contains(platform, "aws") if !acceptedPlatform { g.Skip("Test cases should be run on connected GCP/Azure/AWS, skip for other platforms or disconnected cluster!!") } var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") rcPingPodTemplate = filepath.Join(buildPruningBaseDir, "rc-ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") nadNS = []string{"79060-upgrade-ns1", "79060-upgrade-ns2"} servicename = "test-service" ) exutil.By("1. Create two namespaces") for i := 0; i < 2; i++ { oc.CreateSpecificNamespaceUDN(nadNS[i]) } exutil.By("2. Create CRD for layer3 UDN in namespace ns1") createGeneralUDNCRD(oc, nadNS[0], "udn-network-"+nadNS[0], "", "", "10.200.0.0/16", "layer3") exutil.By("3. Create CRD for layer2 UDN in namespace ns2") createGeneralUDNCRD(oc, nadNS[1], "udn-network-"+nadNS[1], "", "", "10.151.0.0/16", "layer2") exutil.By("4. Create pod for service per namespace") pods := make([]replicationControllerPingPodResource, 2) for i := 0; i < 2; i++ { pods[i] = replicationControllerPingPodResource{ name: "hello-pod", replicas: 1, namespace: nadNS[i], template: rcPingPodTemplate, } pods[i].createReplicaController(oc) err := waitForPodWithLabelReady(oc, pods[i].namespace, "name="+pods[i].name) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", pods[i].name)) } exutil.By("5. Create LoadBalancer service per namespace") svc := make([]genericServiceResource, 2) for i := 0; i < 2; i++ { svc[i] = genericServiceResource{ servicename: servicename, namespace: nadNS[i], protocol: "TCP", selector: pods[i].name, serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", nadNS[i], svc[i].servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc[i].servicename)) } exutil.By("6. Get LoadBalancer service URL") var svcExternalIP [2]string for i := 0; i < 2; i++ { if platform == "aws" { svcExternalIP[i] = getLBSVCHostname(oc, nadNS[i], svc[i].servicename) } else { svcExternalIP[i] = getLBSVCIP(oc, nadNS[i], svc[i].servicename) } e2e.Logf("Got service EXTERNAL-IP %s from namespace %s", svcExternalIP[i], nadNS[i]) o.Expect(svcExternalIP[i]).NotTo(o.BeEmpty()) } exutil.By("7. Curl the service from test runner") for i := 0; i < 2; i++ { svcURL := net.JoinHostPort(svcExternalIP[i], "27017") svcCmd := fmt.Sprintf("curl %s --connect-timeout 30", svcURL) e2e.Logf("svcCmd: %s", svcCmd) err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcCmd).Output() if err1 != nil || !strings.Contains(string(output), "Hello OpenShift") { e2e.Logf("got err: %v, and try next round", err1) return false, nil } e2e.Logf("The service %s access passed!", svcURL) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Fail to curl the service EXTERNAL-IP %s from test runner", svcURL)) } })
test case
openshift/openshift-tests-private
81cd2ab6-b6d2-471e-91b2-c7ed7d6a6660
Author:qiowang-ConnectedOnly-PstChkUpgrade-High-79060-Validate UDN LoadBalancer service post upgrade
['"context"', '"fmt"', '"net"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:qiowang-ConnectedOnly-PstChkUpgrade-High-79060-Validate UDN LoadBalancer service post upgrade", func() { platform := exutil.CheckPlatform(oc) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") || strings.Contains(platform, "aws") if !acceptedPlatform { g.Skip("Test cases should be run on connected GCP/Azure/AWS, skip for other platforms or disconnected cluster!!") } var ( nadNS = []string{"79060-upgrade-ns1", "79060-upgrade-ns2"} servicename = "test-service" ) exutil.By("1. Check the two namespaces are carried over") for i := 0; i < 2; i++ { nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", nadNS[i]).Execute() if nsErr != nil { g.Skip("Skip the PstChkUpgrade test as namespace " + nadNS[i] + " does not exist, PreChkUpgrade test did not run") } } for i := 0; i < 2; i++ { defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", nadNS[i], "--ignore-not-found=true").Execute() } exutil.By("2. Get LoadBalancer service URL") var svcExternalIP [2]string for i := 0; i < 2; i++ { if platform == "aws" { svcExternalIP[i] = getLBSVCHostname(oc, nadNS[i], servicename) } else { svcExternalIP[i] = getLBSVCIP(oc, nadNS[i], servicename) } e2e.Logf("Got service EXTERNAL-IP %s from namespace %s", svcExternalIP[i], nadNS[i]) o.Expect(svcExternalIP[i]).NotTo(o.BeEmpty()) } exutil.By("3. Curl the service from test runner") for i := 0; i < 2; i++ { svcURL := net.JoinHostPort(svcExternalIP[i], "27017") svcCmd := fmt.Sprintf("curl %s --connect-timeout 30", svcURL) e2e.Logf("svcCmd: %s", svcCmd) err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 120*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcCmd).Output() if err1 != nil || !strings.Contains(string(output), "Hello OpenShift") { e2e.Logf("got err: %v, and try next round", err1) return false, nil } e2e.Logf("The service %s access passed!", svcURL) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Fail to curl the service EXTERNAL-IP %s from test runner", svcURL)) } })
test case
openshift/openshift-tests-private
9dfe6d1c-ee7d-4305-9a76-cd878aa659e6
Author:huirwang-PreChkUpgrade-High-79034-Validate UDN clusterIP/nodePort service post upgrade.
['"context"', '"fmt"', '"path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-PreChkUpgrade-High-79034-Validate UDN clusterIP/nodePort service post upgrade.", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml") allNS = []string{"79034-upgrade-ns1", "79034-upgrade-ns2", "79034-upgrade-ns3", "79034-upgrade-ns4"} rcPingPodTemplate = filepath.Join(buildPruningBaseDir, "rc-ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) exutil.By("1. create four new namespaces") for i := 0; i < 4; i++ { oc.CreateSpecificNamespaceUDN(allNS[i]) } exutil.By("2. Create CRD for layer3 UDN in namespace ns1, ns2") ipStackType := checkIPStackType(oc) var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipFamilyPolicy = "PreferDualStack" ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" } } for i := 0; i < 2; i++ { createGeneralUDNCRD(oc, allNS[i], "udn-network-"+allNS[i], ipv4cidr, ipv6cidr, cidr, "layer3") } exutil.By("3. Create CRD for layer2 UDN in namespace ns3,ns4.") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipFamilyPolicy = "PreferDualStack" ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" } } for i := 2; i < 4; i++ { createGeneralUDNCRD(oc, allNS[i], "udn-network-"+allNS[i], ipv4cidr, ipv6cidr, cidr, "layer2") } exutil.By("4. Create test pod in each namespace") podsBackend := make([]replicationControllerPingPodResource, 4) for i := 0; i < 4; i++ { podsBackend[i] = replicationControllerPingPodResource{ name: "hello-pod", replicas: 1, namespace: allNS[i], template: rcPingPodTemplate, } podsBackend[i].createReplicaController(oc) err := waitForPodWithLabelReady(oc, podsBackend[i].namespace, "name="+podsBackend[i].name) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pods with label name=%s not ready", podsBackend[i].name)) } exutil.By("5. Create ClusterIP service in ns1,ns3,nodePort svc in ns2,ns4") svc := make([]genericServiceResource, 4) var serviceType string for i := 0; i < 4; i++ { if i == 1 || i == 3 { serviceType = "NodePort" } else { serviceType = "ClusterIP" } svc[i] = genericServiceResource{ servicename: "test-service", namespace: allNS[i], protocol: "TCP", selector: "hello-pod", serviceType: serviceType, ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc[i].createServiceFromParams(oc) } exutil.By("6. Create udn clients in each namespace") var udnClient []string for i := 0; i < 4; i++ { createResourceFromFile(oc, allNS[i], statefulSetHelloPod) podErr := waitForPodWithLabelReady(oc, allNS[i], "app=hello") exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready") udnClient = append(udnClient, getPodName(oc, allNS[i], "app=hello")[0]) } exutil.By("7. Verify the pod2service connection in ns1 for layer3.") CurlPod2SvcPass(oc, allNS[0], allNS[0], udnClient[0], svc[0].servicename) exutil.By("8. Verify the pod2service connection in ns3 for layer2.") CurlPod2SvcPass(oc, allNS[2], allNS[2], udnClient[2], svc[2].servicename) exutil.By("9. Verify the pod2service isolation from ns2 to ns1 for layer3") CurlPod2SvcFail(oc, allNS[1], allNS[0], udnClient[1], svc[0].servicename) exutil.By("10. Verify the pod2service isolation from ns4 to ns3 for layer2") CurlPod2SvcFail(oc, allNS[3], allNS[2], udnClient[3], svc[2].servicename) exutil.By("11. Verify the nodePort service in ns2 can be accessed for layer3.") nodePortNS2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[1], svc[1].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("There are less than 2 worker nodes and nodePort service validation will be skipped! ") } clientNode := nodeList.Items[0].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS2) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS2) exutil.By("12. Verify the nodePort service in ns4 can be accessed for layer2.") nodePortNS4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[3], svc[3].servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS4) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS4) })
test case
openshift/openshift-tests-private
bb6e80d6-d373-4d63-9701-dbc1a4ff2b07
Author:huirwang-PstChkUpgrade-High-79034-Validate UDN clusterIP/nodePort service post upgrade.
['"context"', '"fmt"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:huirwang-PstChkUpgrade-High-79034-Validate UDN clusterIP/nodePort service post upgrade.", func() { var ( allNS = []string{"79034-upgrade-ns1", "79034-upgrade-ns2", "79034-upgrade-ns3", "79034-upgrade-ns4"} svcName = "test-service" ) for i := 0; i < 4; i++ { nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", allNS[i]).Execute() if nsErr != nil { g.Skip(fmt.Sprintf("Skip the PstChkUpgrade test as %s namespace does not exist, PreChkUpgrade test did not run", allNS[i])) } } for i := 0; i < 4; i++ { defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", allNS[i], "--ignore-not-found=true").Execute() } exutil.By("1. Get udn clients from preserved four namespaces") var udnClient []string for i := 0; i < 4; i++ { podErr := waitForPodWithLabelReady(oc, allNS[i], "app=hello") exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready") udnClient = append(udnClient, getPodName(oc, allNS[i], "app=hello")[0]) } exutil.By("2. Verify the pod2service connection in ns1 for layer3.") CurlPod2SvcPass(oc, allNS[0], allNS[0], udnClient[0], svcName) exutil.By("3. Verify the pod2service connection in ns3 for layer2.") CurlPod2SvcPass(oc, allNS[2], allNS[2], udnClient[2], svcName) exutil.By("4. Verify the pod2service isolation from ns2 to ns1 for layer3") CurlPod2SvcFail(oc, allNS[1], allNS[0], udnClient[1], svcName) exutil.By("5. Verify the pod2service isolation from ns4 to ns3 for layer2") CurlPod2SvcFail(oc, allNS[3], allNS[2], udnClient[3], svcName) exutil.By("6. Verify the nodePort service in ns2 can be accessed for layer3.") nodePortNS2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[1], svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("There are less than 2 worker nodes and nodePort service validation will be skipped! ") } clientNode := nodeList.Items[0].Name o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS2) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS2) exutil.By("7. Verify the nodePort service in ns4 can be accessed for layer2.") nodePortNS4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[3], svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) CurlNodePortPass(oc, clientNode, nodeList.Items[0].Name, nodePortNS4) CurlNodePortPass(oc, clientNode, nodeList.Items[1].Name, nodePortNS4) })
test case
openshift/openshift-tests-private
7b41afe9-f5af-4f27-a667-51e361f5e6ba
Author:qiowang-NonHyperShiftHOST-PreChkUpgrade-Medium-44790-High-79163-Validate ExternalIP service for default and UDN pods post upgrade [Disruptive]
['"context"', '"fmt"', '"net"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:qiowang-NonHyperShiftHOST-PreChkUpgrade-Medium-44790-High-79163-Validate ExternalIP service for default and UDN pods post upgrade [Disruptive]", func() { nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } ipStackType := checkIPStackType(oc) var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") rcPingPodTemplate = filepath.Join(buildPruningBaseDir, "rc-ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") allNS = []string{"79163-upgrade-ns1", "79163-upgrade-ns2", "79163-upgrade-ns3"} ipFamilyPolicy = "SingleStack" serviceName = "test-service" ) exutil.By("1. Create three namespaces, ns1 and ns2 for udn network testing, ns3 for default network testing") for i := 0; i < 2; i++ { oc.CreateSpecificNamespaceUDN(allNS[i]) } oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", allNS[2]).Execute() exutil.By("2. Find externalIP for testing") var externalIP, externalIPv6 []string for i := 0; i < 3; i++ { nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[i].Name) externalIP = append(externalIP, nodeIP2) if ipStackType == "dualstack" { externalIPv6 = append(externalIPv6, nodeIP1) } } exutil.By("3. Patch network.config to enable externalIP") allowedCIDRs := `"` + externalIP[0] + `","` + externalIP[1] + `","` + externalIP[2] + `"` if ipStackType == "dualstack" { allowedCIDRs = allowedCIDRs + `,"` + externalIPv6[0] + `","` + externalIPv6[1] + `","` + externalIPv6[2] + `"` } patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":["+allowedCIDRs+"]}}}}") exutil.By("4. Create CRD for layer3 UDN in namespace ns1") var cidr, ipv4cidr, ipv6cidr string if ipStackType == "ipv4single" { cidr = "10.150.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2010:100:200::0/48" } else { ipv4cidr = "10.150.0.0/16" ipv6cidr = "2010:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, allNS[0], "udn-network-"+allNS[0], ipv4cidr, ipv6cidr, cidr, "layer3") exutil.By("5. Create CRD for layer2 UDN in namespace ns2") if ipStackType == "ipv4single" { cidr = "10.151.0.0/16" } else { if ipStackType == "ipv6single" { cidr = "2011:100:200::0/48" } else { ipv4cidr = "10.151.0.0/16" ipv6cidr = "2011:100:200::0/48" ipFamilyPolicy = "PreferDualStack" } } createGeneralUDNCRD(oc, allNS[1], "udn-network-"+allNS[1], ipv4cidr, ipv6cidr, cidr, "layer2") exutil.By("6. Create pod as backend pod for service in each ns") var podsBackendName []string for i := 0; i < 3; i++ { podsBackend := replicationControllerPingPodResource{ name: "hello-pod-1", replicas: 0, namespace: allNS[i], template: rcPingPodTemplate, } podsBackend.createReplicaController(oc) e2e.Logf("schedual backend pod to " + nodeList.Items[i].Name) patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("rc/"+podsBackend.name, "-n", allNS[i], "-p", "{\"spec\":{\"replicas\":1,\"template\":{\"spec\":{\"nodeSelector\":{\"kubernetes.io/hostname\":\""+nodeList.Items[i].Name+"\"}}}}}", "--type=merge").Execute() o.Expect(patchErr).NotTo(o.HaveOccurred()) err := waitForPodWithLabelReady(oc, podsBackend.namespace, "name="+podsBackend.name) exutil.AssertWaitPollNoErr(err, "The backend pod is not ready") podsBackendName = append(podsBackendName, getPodName(oc, allNS[i], "name="+podsBackend.name)[0]) } exutil.By("7. Create udn client pod on different node in ns1 and ns2") var udnClientName []string for i := 0; i < 2; i++ { udnClient := replicationControllerPingPodResource{ name: "hello-pod-2", replicas: 0, namespace: allNS[i], template: rcPingPodTemplate, } udnClient.createReplicaController(oc) e2e.Logf("schedual udn client pod to " + nodeList.Items[2].Name) patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("rc/"+udnClient.name, "-n", allNS[i], "-p", "{\"spec\":{\"replicas\":1,\"template\":{\"spec\":{\"nodeSelector\":{\"kubernetes.io/hostname\":\""+nodeList.Items[2].Name+"\"}}}}}", "--type=merge").Execute() o.Expect(patchErr).NotTo(o.HaveOccurred()) err := waitForPodWithLabelReady(oc, udnClient.namespace, "name="+udnClient.name) exutil.AssertWaitPollNoErr(err, "The udn client pod is not ready") udnClientName = append(udnClientName, getPodName(oc, allNS[i], "name="+udnClient.name)[0]) } exutil.By("8. Create a ClusterIP service in each ns") for i := 0; i < 3; i++ { svc := genericServiceResource{ servicename: serviceName, namespace: allNS[i], protocol: "TCP", selector: "hello-pod-1", serviceType: "ClusterIP", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", template: genericServiceTemplate, } svc.createServiceFromParams(oc) e2e.Logf("Patch ExternalIP to service") patchResourceAsAdmin(oc, "svc/"+svc.servicename, fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\"]}}", externalIP[i]), allNS[i]) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[i], svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(externalIP[i])) } exutil.By("9. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("10. Validate the externalIP service for layer3 UDN") } else { exutil.By("11. Validate the externalIP service for layer2 UDN") } exutil.By("Validate the externalIP service can be accessed from another udn pod") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } if ipStackType == "dualstack" { exutil.By("Retest it with IPv6 address in dualstack cluster") exutil.By("12. Patch IPv6 ExternalIP to service") for i := 0; i < 3; i++ { patchResourceAsAdmin(oc, "svc/"+serviceName, fmt.Sprintf("{\"spec\":{\"externalIPs\": [\"%s\",\"%s\"]}}", externalIP[i], externalIPv6[i]), allNS[i]) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", allNS[i], serviceName).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(serviceName)) } exutil.By("13. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("14. Validate the externalIP service for layer3 UDN - ipv6") } else { exutil.By("15. Validate the externalIP service for layer2 UDN - ipv6") } exutil.By("Validate the externalIP service can be accessed from another udn pod - ipv6") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } } })
test case
openshift/openshift-tests-private
c4051f61-d064-4926-b4a3-5e5310763774
Author:qiowang-NonHyperShiftHOST-PstChkUpgrade-Medium-44790-High-79163-Validate ExternalIP service for default and UDN pods post upgrade [Disruptive]
['"context"', '"net"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/service_udn.go
g.It("Author:qiowang-NonHyperShiftHOST-PstChkUpgrade-Medium-44790-High-79163-Validate ExternalIP service for default and UDN pods post upgrade [Disruptive]", func() { defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 3 { g.Skip("This test requires at least 3 worker nodes which is not fulfilled. ") } ipStackType := checkIPStackType(oc) var ( allNS = []string{"79163-upgrade-ns1", "79163-upgrade-ns2", "79163-upgrade-ns3"} podBackendLabel = "hello-pod-1" udnClientLabel = "hello-pod-2" ) exutil.By("1. Check the three namespaces are carried over") for i := 0; i < 3; i++ { nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", allNS[i]).Execute() if nsErr != nil { g.Skip("Skip the PstChkUpgrade test as namespace " + allNS[i] + " does not exist, PreChkUpgrade test did not run") } } for i := 0; i < 3; i++ { defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", allNS[i], "--ignore-not-found=true").Execute() } exutil.By("2. Get externalIP for testing") var externalIP, externalIPv6 []string for i := 0; i < 3; i++ { nodeIP1, nodeIP2 := getNodeIP(oc, nodeList.Items[i].Name) externalIP = append(externalIP, nodeIP2) if ipStackType == "dualstack" { externalIPv6 = append(externalIPv6, nodeIP1) } } exutil.By("3. Get backend pod from preserved namespaces") var podsBackendName []string for i := 0; i < 3; i++ { err := waitForPodWithLabelReady(oc, allNS[i], "name="+podBackendLabel) exutil.AssertWaitPollNoErr(err, "The backend pod is not ready") podsBackendName = append(podsBackendName, getPodName(oc, allNS[i], "name="+podBackendLabel)[0]) } exutil.By("4. Get udn clients from preserved namespaces") var udnClientName []string for i := 0; i < 2; i++ { err := waitForPodWithLabelReady(oc, allNS[i], "name="+udnClientLabel) exutil.AssertWaitPollNoErr(err, "The udn client pod is not ready") udnClientName = append(udnClientName, getPodName(oc, allNS[i], "name="+udnClientLabel)[0]) } exutil.By("5. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("6. Validate the externalIP service for layer3 UDN") } else { exutil.By("7. Validate the externalIP service for layer2 UDN") } exutil.By("Validate the externalIP service can be accessed from another udn pod") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIP[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIP[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } if ipStackType == "dualstack" { exutil.By("Retest it with IPv6 address in dualstack cluster") exutil.By("8. Validate the externalIP service for default network") _, err := e2eoutput.RunHostCmdWithRetries(allNS[2], podsBackendName[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[2], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) for i := 0; i < 2; i++ { if i == 0 { exutil.By("9. Validate the externalIP service for layer3 UDN - ipv6") } else { exutil.By("10. Validate the externalIP service for layer2 UDN - ipv6") } exutil.By("Validate the externalIP service can be accessed from another udn pod - ipv6") _, err := e2eoutput.RunHostCmdWithRetries(allNS[i], udnClientName[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(externalIPv6[i], "27017"), 5*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from same node as service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[i].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the externalIP service can be accessed from different node than service backend pod - ipv6") _, err = exutil.DebugNode(oc, nodeList.Items[2].Name, "curl", net.JoinHostPort(externalIPv6[i], "27017"), "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } } })
test
openshift/openshift-tests-private
31cf6749-951f-4afe-8d4a-815aef4939f4
services
import ( "context" "fmt" "net" "os/exec" "path/filepath" "strconv" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/openshift-tests-private/test/extended/util" clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" )
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
package networking import ( "context" "fmt" "net" "os/exec" "path/filepath" "strconv" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/openshift-tests-private/test/extended/util" clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" ) var _ = g.Describe("[sig-networking] SDN service", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("networking-services", exutil.KubeConfigPath()) g.BeforeEach(func() { networkType := checkNetworkType(oc) if !strings.Contains(networkType, "ovn") { g.Skip("Skip testing on non-ovn cluster!!!") } }) // author: [email protected] g.It("Author:huirwang-WRS-High-50347-V-ACS.04-[FdpOvnOvs] internalTrafficPolicy set Local for pod/node to service access", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } g.By("Create a namespace") oc.SetupProject() ns1 := oc.Namespace() g.By("create 1st hello pod in ns1") pod1 := pingPodResourceNode{ name: "hello-pod1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod1.createPingPodNode(oc) waitPodReady(oc, ns1, pod1.name) g.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Local", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } svc.ipFamilyPolicy = "SingleStack" svc.createServiceFromParams(oc) g.By("Create second namespace") oc.SetupProject() ns2 := oc.Namespace() g.By("Create a pod hello-pod2 in second namespace, pod located the same node") pod2 := pingPodResourceNode{ name: "hello-pod2", namespace: ns2, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod2.createPingPodNode(oc) waitPodReady(oc, ns2, pod2.name) g.By("Create second pod hello-pod3 in second namespace, pod located on the different node") pod3 := pingPodResourceNode{ name: "hello-pod3", namespace: ns2, nodename: nodeList.Items[1].Name, template: pingPodNodeTemplate, } pod3.createPingPodNode(oc) waitPodReady(oc, ns2, pod3.name) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failling") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node0 to service:port") CurlNode2SvcPass(oc, pod1.nodename, ns1, "test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { g.By("Delete testservice from ns") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking pod to svc:port behavior now on with PreferDualStack Service") svc.ipFamilyPolicy = "PreferDualStack" svc.createServiceFromParams(oc) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failling") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node0 to service:port") //Due to bug 2078691,skip below step for now. //CurlNode2SvcPass(oc, pod1.nodename, ns1,"test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") } }) // author: [email protected] g.It("Author:huirwang-WRS-High-50348-V-ACS.04-[FdpOvnOvs] internalTrafficPolicy set Local for pod/node to service access with hostnetwork pod backend. [Serial]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } g.By("Create a namespace") oc.SetupProject() ns1 := oc.Namespace() //Required for hostnetwork pod err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("create 1st hello pod in ns1") pod1 := pingPodResourceNode{ name: "hello-pod1", namespace: ns1, nodename: nodeList.Items[0].Name, template: hostNetworkPodTemplate, } pod1.createPingPodNode(oc) waitPodReady(oc, ns1, pod1.name) g.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Local", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } svc.ipFamilyPolicy = "SingleStack" svc.createServiceFromParams(oc) g.By("Create second namespace") oc.SetupProject() ns2 := oc.Namespace() g.By("Create a pod hello-pod2 in second namespace, pod located the same node") pod2 := pingPodResourceNode{ name: "hello-pod2", namespace: ns2, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod2.createPingPodNode(oc) waitPodReady(oc, ns2, pod2.name) g.By("Create second pod hello-pod3 in second namespace, pod located on the different node") pod3 := pingPodResourceNode{ name: "hello-pod3", namespace: ns2, nodename: nodeList.Items[1].Name, template: pingPodNodeTemplate, } pod3.createPingPodNode(oc) waitPodReady(oc, ns2, pod3.name) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failing") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") g.By("Curl from node0 to service:port") CurlNode2SvcPass(oc, nodeList.Items[0].Name, ns1, "test-service") ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { g.By("Delete testservice from ns") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking pod to svc:port behavior now on with PreferDualStack Service") svc.ipFamilyPolicy = "PreferDualStack" svc.createServiceFromParams(oc) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failing") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") } }) // author: [email protected] g.It("Author:weliang-Medium-57344-Add support for service session affinity timeout", func() { //Bug: https://issues.redhat.com/browse/OCPBUGS-4502 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") servicesBaseDir = exutil.FixturePath("testdata", "networking/services") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml") sessionAffinitySvcv4 = filepath.Join(servicesBaseDir, "sessionaffinity-svcv4.yaml") sessionAffinitySvcdualstack = filepath.Join(servicesBaseDir, "sessionaffinity-svcdualstack.yaml") sessionAffinityPod1 = filepath.Join(servicesBaseDir, "sessionaffinity-pod1.yaml") sessionAffinityPod2 = filepath.Join(servicesBaseDir, "sessionaffinity-pod2.yaml") ) ns1 := oc.Namespace() g.By("create two pods which will be the endpoints for sessionaffinity service in ns1") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinityPod1, "-n", ns1).Execute() defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinityPod2, "-n", ns1).Execute() createResourceFromFile(oc, ns1, sessionAffinityPod1) waitPodReady(oc, ns1, "blue-pod-1") createResourceFromFile(oc, ns1, sessionAffinityPod2) waitPodReady(oc, ns1, "blue-pod-2") g.By("create a testing pod in ns1") pod1 := pingPodResource{ name: "hello-pod1", namespace: ns1, template: pingPodTemplate, } defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1.name, "-n", pod1.namespace).Execute() pod1.createPingPod(oc) waitPodReady(oc, ns1, pod1.name) ipStackType := checkIPStackType(oc) if ipStackType == "ipv4single" { g.By("test ipv4 singlestack cluster") g.By("create a sessionaffinity service in ns1") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinitySvcv4, "-n", ns1).Execute() createsvcerr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", sessionAffinitySvcv4, "-n", ns1).Execute() o.Expect(createsvcerr).NotTo(o.HaveOccurred()) svcoutput, svcerr := oc.AsAdmin().Run("get").Args("service", "-n", ns1).Output() o.Expect(svcerr).NotTo(o.HaveOccurred()) o.Expect(svcoutput).To(o.ContainSubstring("sessionaffinitysvcv4")) serviceIPv4 := getSvcIPv4(oc, ns1, "sessionaffinitysvcv4") // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will LB after curl sleep more than 10s g.By("Traffic will LB to two endpoints with sleep 15s in curl") trafficoutput, trafficerr := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 11; done") o.Expect(trafficerr).NotTo(o.HaveOccurred()) if strings.Contains(trafficoutput, "Hello Blue Pod-1") && strings.Contains(trafficoutput, "Hello Blue Pod-2") { e2e.Logf("Pass : Traffic LB to two endpoints when curl sleep more than 10s") } else { e2e.Failf("Fail: Traffic does not LB to two endpoints when curl sleep more than 10s") } // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will not LB after curl sleep less than 10s g.By("Traffic will not LB to two endpoints without sleep 15s in curl") trafficoutput1, trafficerr1 := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 9; done") o.Expect(trafficerr1).NotTo(o.HaveOccurred()) if (strings.Contains(trafficoutput1, "Hello Blue Pod-1") && !strings.Contains(trafficoutput1, "Hello Blue Pod-2")) || (strings.Contains(trafficoutput1, "Hello Blue Pod-2") && !strings.Contains(trafficoutput1, "Hello Blue Pod-1")) { e2e.Logf("Pass : Traffic does not LB to two endpoints when curl sleep less than 10s") } else { e2e.Failf("Fail: Traffic LB to two endpoints when curl sleep less than 10s") } } if ipStackType == "dualstack" { g.By("test dualstack cluster") g.By("create a sessionaffinity service in ns1") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinitySvcdualstack, "-n", ns1).Execute() createsvcerr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", sessionAffinitySvcdualstack, "-n", ns1).Execute() o.Expect(createsvcerr).NotTo(o.HaveOccurred()) svcoutput, svcerr := oc.AsAdmin().Run("get").Args("service", "-n", ns1).Output() o.Expect(svcerr).NotTo(o.HaveOccurred()) o.Expect(svcoutput).To(o.ContainSubstring("sessionaffinitysvcdualstack")) serviceIPv4 := getSvcIPv4(oc, ns1, "sessionaffinitysvcdualstack") serviceIPv6 := getSvcIPv6(oc, ns1, "sessionaffinitysvcdualstack") // Test ipv4 traffic in dualstack cluster // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will LB after curl sleep more than 10s g.By("Traffic will LB to two endpoints with sleep 15s in curl") trafficoutput, trafficerr := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 11; done") o.Expect(trafficerr).NotTo(o.HaveOccurred()) if strings.Contains(trafficoutput, "Hello Blue Pod-1") && strings.Contains(trafficoutput, "Hello Blue Pod-2") { e2e.Logf("Pass : Traffic LB to two endpoints when curl sleep more than 10s") } else { e2e.Failf("Fail: Traffic does not LB to two endpoints when curl sleep more than 10s") } // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will not LB after curl sleep less than 10s g.By("Traffic will not LB to two endpoints without sleep 15s in curl") trafficoutput1, trafficerr1 := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 9; done") o.Expect(trafficerr1).NotTo(o.HaveOccurred()) if (strings.Contains(trafficoutput1, "Hello Blue Pod-1") && !strings.Contains(trafficoutput1, "Hello Blue Pod-2")) || (strings.Contains(trafficoutput1, "Hello Blue Pod-2") && !strings.Contains(trafficoutput1, "Hello Blue Pod-1")) { e2e.Logf("Pass : Traffic does not LB to two endpoints when curl sleep less than 10s") } else { e2e.Failf("Fail: Traffic LB to two endpoints when curl sleep less than 10s") } // Tes ipv6 traffic in dualstack cluster // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will LB after curl sleep more than 10s g.By("Traffic will LB to two endpoints with sleep 15s in curl") v6trafficoutput, v6trafficerr := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl -g -6 ["+serviceIPv6+"]:8080; sleep 11; done") o.Expect(v6trafficerr).NotTo(o.HaveOccurred()) if strings.Contains(v6trafficoutput, "Hello Blue Pod-1") && strings.Contains(v6trafficoutput, "Hello Blue Pod-2") { e2e.Logf("Pass : Traffic LB to two endpoints when curl sleep more than 10s") } else { e2e.Failf("Fail: Traffic does not LB to two endpoints when curl sleep more than 10s") } // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will not LB after curl sleep less than 10s g.By("Traffic will not LB to two endpoints without sleep 15s in curl") v6trafficoutput1, v6trafficerr1 := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl -g -6 ["+serviceIPv6+"]:8080; sleep 9; done") o.Expect(v6trafficerr1).NotTo(o.HaveOccurred()) if (strings.Contains(v6trafficoutput1, "Hello Blue Pod-1") && !strings.Contains(v6trafficoutput1, "Hello Blue Pod-2")) || (strings.Contains(v6trafficoutput1, "Hello Blue Pod-2") && !strings.Contains(v6trafficoutput1, "Hello Blue Pod-1")) { e2e.Logf("Pass : Traffic does not LB to two endpoints when curl sleep less than 10s") } else { e2e.Failf("Fail: Traffic LB to two endpoints when curl sleep less than 10s") } } }) // author: [email protected] g.It("Longduration-NonPreRelease-Author:asood-High-62293-[FdpOvnOvs] Validate all the constructs are created on logical routers and logical switches for a service type loadbalancer. [Disruptive]", func() { // Bug: https://issues.redhat.com/browse/OCPBUGS-5930 (Duplicate bug https://issues.redhat.com/browse/OCPBUGS-7000) var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") svcEndpoints []svcEndpontDetails lsConstruct string lrConstruct string ) platform := exutil.CheckPlatform(oc) //vSphere does not have LB service support yet e2e.Logf("platform %s", platform) if !(strings.Contains(platform, "gcp") || strings.Contains(platform, "aws") || strings.Contains(platform, "azure")) { g.Skip("Skip for non-supported auto scaling machineset platforms!!") } workerNodes, err := exutil.GetClusterNodesBy(oc, "worker") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Get namespace") ns := oc.Namespace() exutil.By(fmt.Sprintf("create 1st hello pod in %s", ns)) pod := pingPodResourceNode{ name: "hello-pod1", namespace: ns, nodename: workerNodes[0], template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, ns, pod.name) exutil.By("Create a test service which is in front of the above pod") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("Create a new machineset to add new nodes") clusterinfra.SkipConditionally(oc) infrastructureName := clusterinfra.GetInfrastructureName(oc) machinesetName := infrastructureName + "-62293" ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2} defer ms.DeleteMachineSet(oc) ms.CreateMachineSet(oc) clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName) machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName) nodeName0 := clusterinfra.GetNodeNameFromMachine(oc, machineName[0]) nodeName1 := clusterinfra.GetNodeNameFromMachine(oc, machineName[1]) e2e.Logf("The nodes %s and %s added successfully", nodeName0, nodeName1) exutil.By(fmt.Sprintf("create 2nd hello pod in %s on newly created node %s", ns, nodeName0)) pod = pingPodResourceNode{ name: "hello-pod2", namespace: ns, nodename: nodeName0, template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, ns, pod.name) exutil.By("Get backend pod details of user service") allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=hello-pod") o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPods)).NotTo(o.BeEquivalentTo(0)) for _, eachPod := range allPods { nodeName, nodeNameErr := exutil.GetPodNodeName(oc, ns, eachPod) o.Expect(nodeNameErr).NotTo(o.HaveOccurred()) podIP := getPodIPv4(oc, ns, eachPod) ovnkubeNodePod, ovnKubeNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(ovnKubeNodePodErr).NotTo(o.HaveOccurred()) svcEndpoint := svcEndpontDetails{ ovnKubeNodePod: ovnkubeNodePod, nodeName: nodeName, podIP: podIP, } svcEndpoints = append(svcEndpoints, svcEndpoint) } exutil.By("Get logical route and switch on node for endpoints of both services to validate they exist on both new and old node") for _, eachEndpoint := range svcEndpoints { lsConstruct = eachEndpoint.getOVNConstruct(oc, "ls-list") o.Expect(lsConstruct).NotTo(o.BeEmpty()) e2e.Logf("Logical Switch %s on node %s", lsConstruct, eachEndpoint.nodeName) o.Expect(eachEndpoint.getOVNLBContruct(oc, "ls-lb-list", lsConstruct)).To(o.BeTrue()) lrConstruct = eachEndpoint.getOVNConstruct(oc, "lr-list") o.Expect(lrConstruct).NotTo(o.BeEmpty()) e2e.Logf("Logical Router %s on node %s", lrConstruct, eachEndpoint.nodeName) o.Expect(eachEndpoint.getOVNLBContruct(oc, "lr-lb-list", lrConstruct)).To(o.BeTrue()) } exutil.By("Validate kubernetes service is reachable from all nodes including new nodes") allNodes, nodeErr := exutil.GetAllNodes(oc) o.Expect(nodeErr).NotTo(o.HaveOccurred()) o.Expect(len(allNodes)).NotTo(o.BeEquivalentTo(0)) for i := 0; i < len(allNodes); i++ { output, err := exutil.DebugNodeWithChroot(oc, allNodes[i], "bash", "-c", "curl -s -k https://172.30.0.1/healthz --connect-timeout 5") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(output, "ok")).To(o.BeTrue()) } }) // author: [email protected] g.It("Longduration-NonPreRelease-Author:asood-High-63156-Verify the nodeport is not allocated to VIP based LoadBalancer service type. [Disruptive]", func() { // LoadBalancer service implementation are different on cloud provider and bare metal platform // https://issues.redhat.com/browse/OCPBUGS-10874 (aws and azure pending support) var ( testDataDir = exutil.FixturePath("testdata", "networking/metallb") loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml") serviceLabelKey = "environ" serviceLabelValue = "Test" svc_names = [2]string{"hello-world-cluster", "hello-world-local"} svc_etp = [2]string{"Cluster", "Local"} namespaces []string ) platform := exutil.CheckPlatform(oc) e2e.Logf("platform %s", platform) if !(strings.Contains(platform, "gcp")) { g.Skip("Skip for non-supported platorms!") } masterNodes, err := exutil.GetClusterNodesBy(oc, "master") o.Expect(err).NotTo(o.HaveOccurred()) g.By("Get first namespace and create another") ns := oc.Namespace() namespaces = append(namespaces, ns) oc.SetupProject() ns = oc.Namespace() namespaces = append(namespaces, ns) var desiredMode string origMode := getOVNGatewayMode(oc) defer switchOVNGatewayMode(oc, origMode) g.By("Validate services in original gateway mode " + origMode) for j := 0; j < 2; j++ { for i := 0; i < 2; i++ { svcName := svc_names[i] + "-" + strconv.Itoa(j) g.By("Create a service " + svc_names[i] + " with ExternalTrafficPolicy " + svc_etp[i]) svc := loadBalancerServiceResource{ name: svcName, namespace: namespaces[i], externaltrafficpolicy: svc_etp[i], labelKey: serviceLabelKey, labelValue: serviceLabelValue, allocateLoadBalancerNodePorts: false, template: loadBalancerServiceTemplate, } result := createLoadBalancerService(oc, svc, loadBalancerServiceTemplate) o.Expect(result).To(o.BeTrue()) g.By("Check LoadBalancer service status") err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name) o.Expect(err).NotTo(o.HaveOccurred()) g.By("Get LoadBalancer service IP") svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name) g.By("Validate service") result = validateService(oc, masterNodes[0], svcIP) o.Expect(result).To(o.BeTrue()) g.By("Check nodePort is not assigned to service") nodePort := getLoadBalancerSvcNodePort(oc, svc.namespace, svc.name) o.Expect(nodePort).To(o.BeEmpty()) } if j == 0 { g.By("Change the shared gateway mode to local gateway mode") if origMode == "local" { desiredMode = "shared" } else { desiredMode = "local" } e2e.Logf("Cluster is currently on gateway mode %s", origMode) e2e.Logf("Desired mode is %s", desiredMode) switchOVNGatewayMode(oc, desiredMode) g.By("Validate services in modified gateway mode " + desiredMode) } } }) // author: [email protected] g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:huirwang-Medium-65796-Recreated service should have correct load_balancer nb entries for same name load_balancer. [Serial]", func() { // From customer bug https://issues.redhat.com/browse/OCPBUGS-11716 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) exutil.By("Get namespace ") ns := oc.Namespace() exutil.By("create hello pod in namespace") pod1 := pingPodResource{ name: "hello-pod", namespace: ns, template: pingPodTemplate, } pod1.createPingPod(oc) waitPodReady(oc, ns, pod1.name) ipStack := checkIPStackType(oc) var podIPv6, podIPv4 string if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, pod1.name) } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, pod1.name) } else { podIPv4, _ = getPodIP(oc, ns, pod1.name) } exutil.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } if ipStack == "dualstack" { svc.ipFamilyPolicy = "PreferDualStack" } else { svc.ipFamilyPolicy = "SingleStack" } svc.createServiceFromParams(oc) exutil.By("Check service status") svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("Get service IP") var svcIP6, svcIP4, clusterVIP string if ipStack == "dualstack" || ipStack == "ipv6single" { svcIP6, svcIP4 = getSvcIP(oc, svc.namespace, svc.servicename) } else { svcIP4, _ = getSvcIP(oc, svc.namespace, svc.servicename) } e2e.Logf("ipstack type: %s, SVC's IPv4: %s, SVC's IPv6: %s", ipStack, svcIP4, svcIP6) exutil.By("Check nb loadbalancer entries") ovnPod := getOVNKMasterOVNkubeNode(oc) o.Expect(ovnPod).ShouldNot(o.BeEmpty()) e2e.Logf("\n ovnKMasterPod: %v\n", ovnPod) lbCmd := fmt.Sprintf("ovn-nbctl --column vip find load_balancer name=Service_%s/%s_TCP_cluster", ns, svc.servicename) lbOutput, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmd) e2e.Logf("\nlbOutput: %s\n", lbOutput) o.Expect(err).NotTo(o.HaveOccurred()) if ipStack == "dualstack" || ipStack == "ipv6single" { clusterVIP = fmt.Sprintf("\"[%s]:%s\"=\"[%s]:%s\"", svcIP6, "27017", podIPv6, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } if ipStack == "dualstack" || ipStack == "ipv4single" { clusterVIP = fmt.Sprintf("\"%s:%s\"=\"%s:%s\"", svcIP4, "27017", podIPv4, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } exutil.By("Delete svc") removeResource(oc, true, true, "service", svc.servicename, "-n", ns) exutil.By("Manually add load_balancer entry in nb with same name as previous one.") // no need to defer to remove, as this will be overrided by following service recreated. var lbCmdAdd string if ipStack == "dualstack" || ipStack == "ipv6single" { lbCmdAdd = fmt.Sprintf("ovn-nbctl lb-add \"Service_%s/%s_TCP_cluster\" [%s]:%s [%s]:%s", ns, svc.servicename, svcIP6, "27017", podIPv6, "8080") _, err = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmdAdd) o.Expect(err).NotTo(o.HaveOccurred()) } if ipStack == "dualstack" || ipStack == "ipv4single" { lbCmdAdd = fmt.Sprintf("ovn-nbctl lb-add \"Service_%s/%s_TCP_cluster\" %s:%s %s:%s", ns, svc.servicename, svcIP4, "27017", podIPv4, "8080") _, err = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmdAdd) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("Recreate svc") svc.createServiceFromParams(oc) svcOutput, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("Get service IP again") if ipStack == "dualstack" || ipStack == "ipv6single" { svcIP6, svcIP4 = getSvcIP(oc, svc.namespace, svc.servicename) } else { svcIP4, _ = getSvcIP(oc, svc.namespace, svc.servicename) } e2e.Logf("ipstack type: %s, recreated SVC's IPv4: %s, SVC's IPv6: %s", ipStack, svcIP4, svcIP6) exutil.By("No error logs") podlogs, getLogsErr := oc.AsAdmin().Run("logs").Args(ovnPod, "-n", "openshift-ovn-kubernetes", "-c", "ovnkube-controller", "--since", "90s").Output() o.Expect(getLogsErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(podlogs, "failed to ensure service")).ShouldNot(o.BeTrue()) exutil.By("Check nb load_balancer entries again!") lbOutput, err = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmd) e2e.Logf("\nlbOutput after SVC recreated: %s\n", lbOutput) o.Expect(err).NotTo(o.HaveOccurred()) if ipStack == "dualstack" || ipStack == "ipv6single" { clusterVIP = fmt.Sprintf("\"[%s]:%s\"=\"[%s]:%s\"", svcIP6, "27017", podIPv6, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } if ipStack == "dualstack" || ipStack == "ipv4single" { clusterVIP = fmt.Sprintf("\"%s:%s\"=\"%s:%s\"", svcIP4, "27017", podIPv4, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } exutil.By("Validate service") CurlPod2SvcPass(oc, ns, ns, pod1.name, svc.servicename) }) // author: [email protected] g.It("Author:asood-High-46015-[FdpOvnOvs] Verify traffic to outside the cluster redirected when OVN is used and NodePort service is configured.", func() { // Customer bug https://bugzilla.redhat.com/show_bug.cgi?id=1946696 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) ipStackType := checkIPStackType(oc) o.Expect(ipStackType).NotTo(o.BeEmpty()) exutil.By("1. Get list of worker nodes") nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("Not enough node available, need at least two nodes for the test, skip the case!!") } exutil.By("2. Get namespace ") ns := oc.Namespace() exutil.By("3. Create a hello pod in ns") pod := pingPodResourceNode{ name: "hello-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, pod.namespace, pod.name) exutil.By("4. Create a nodePort type service fronting the above pod") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "NodePort", ipFamilyPolicy: "", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } if ipStackType == "dualstack" { svc.ipFamilyPolicy = "PreferDualStack" } else { svc.ipFamilyPolicy = "SingleStack" } defer removeResource(oc, true, true, "service", svc.servicename, "-n", svc.namespace) svc.createServiceFromParams(oc) exutil.By("5. Get NodePort at which service listens.") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. Validate external traffic to node port is redirected.") CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort) curlCmd := fmt.Sprintf("curl -4 -v http://www.google.de:%s --connect-timeout 5", nodePort) resp, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", curlCmd) if (err != nil) || (resp != "") { o.Expect(strings.Contains(resp, "Hello OpenShift")).To(o.BeFalse()) } }) //[email protected] g.It("NonPreRelease-Longduration-Author:asood-Critical-63301-[FdpOvnOvs] Kube's API intermitent timeout via sdn or internal services from nodes or pods using hostnetwork. [Disruptive]", func() { // From customer bug https://issues.redhat.com/browse/OCPBUGS-5828 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) //The test can run on the platforms that have nodes in same subnet as the hostnetworked pod backed service is accessible only such clusters. //The test also adds a bad route on the node from where the service is accessed for testing purpose. exutil.By("Check the platform if it is suitable for running the test") platform := exutil.CheckPlatform(oc) ipStackType := checkIPStackType(oc) if !strings.Contains(platform, "vsphere") && !strings.Contains(platform, "baremetal") { g.Skip("Unsupported platform, skipping the test") } if !strings.Contains(ipStackType, "ipv4single") { g.Skip("Unsupported stack, skipping the test") } exutil.By("Get the schedulable worker nodes in ready state") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This test requires at least two worker nodes") } exutil.By("Switch the GW mode to Local") origMode := getOVNGatewayMode(oc) desiredMode := "local" defer switchOVNGatewayMode(oc, origMode) switchOVNGatewayMode(oc, desiredMode) exutil.By("Get namespace ") ns := oc.Namespace() err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Create pod on host network in namespace") pod1 := pingPodResourceNode{ name: "hello-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: hostNetworkPodTemplate, } pod1.createPingPodNode(oc) waitPodReady(oc, ns, pod1.name) exutil.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service-63301", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("Check service status") svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("Get service IP") //nodeIP1 and nodeIP2 will be IPv6 and IPv4 respectively in case of dual stack and IPv4/IPv6 in 2nd var case of single _, nodeIP := getNodeIP(oc, nodeList.Items[0].Name) var curlCmd, addRouteCmd, delRouteCmd string svcIPv4 := getSvcIPv4(oc, svc.namespace, svc.servicename) curlCmd = fmt.Sprintf("curl -v %s:27017 --connect-timeout 5", svcIPv4) addRouteCmd = fmt.Sprintf("route add %s gw 127.0.0.1 lo", nodeIP) delRouteCmd = fmt.Sprintf("route delete %s", nodeIP) exutil.By("Create another pod for pinging the service") pod2 := pingPodResourceNode{ name: "ping-hello-pod", namespace: ns, nodename: nodeList.Items[1].Name, template: pingPodNodeTemplate, } pod2.createPingPodNode(oc) waitPodReady(oc, pod2.namespace, pod2.name) exutil.LabelPod(oc, pod2.namespace, pod2.name, "name-") exutil.LabelPod(oc, pod2.namespace, pod2.name, "name=ping-hello-pod") exutil.By("Validate the service from pod on cluster network") CurlPod2SvcPass(oc, ns, ns, pod2.name, svc.servicename) exutil.By("Validate the service from pod on host network") output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "bash", "-c", curlCmd) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeTrue()) exutil.By("Create a bad route to node where pod backing the service is running, on the host from where service is accessed ") defer exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", delRouteCmd) _, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", addRouteCmd) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the service from pod on cluster network to verify it fails") CurlPod2SvcFail(oc, ns, ns, pod2.name, svc.servicename) exutil.By("Validate the service from pod on host network to verify it fails") output, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "bash", "-c", curlCmd) if (err != nil) || (output != "") { o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeFalse()) } exutil.By("Delete the route that was added") _, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", delRouteCmd) o.Expect(err).NotTo(o.HaveOccurred()) }) // author: [email protected] g.It("Author:jechen-High-71385-OVNK only choose LB endpoints from ready pods unless there are only terminating pods still in serving state left to choose.", func() { // For customer bug https://issues.redhat.com/browse/OCPBUGS-24363 // OVNK choose LB endpoints in the following sequence: // 1. when there is/are pods in Ready state, ovnk ONLY choose endpoints of ready pods // 2. When there is/are no ready pods, ovnk choose endpoints that terminating + serving endpoints buildPruningBaseDir := exutil.FixturePath("testdata", "networking") testPodFile := filepath.Join(buildPruningBaseDir, "testpod-with-special-lifecycle.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") exutil.By("1.Get namespace \n") ns := oc.Namespace() exutil.By("2. Create test pods and scale test pods to 5 \n") createResourceFromFile(oc, ns, testPodFile) err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=5", "-n", ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = waitForPodWithLabelReady(oc, ns, "name=test-pods") exutil.AssertWaitPollNoErr(err, "Not all test pods with label name=test-pods are ready") exutil.By("3. Create a service in front of the above test pods \n") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "test-pods", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } ipStack := checkIPStackType(oc) if ipStack == "dualstack" { svc.ipFamilyPolicy = "PreferDualStack" } else { svc.ipFamilyPolicy = "SingleStack" } svc.createServiceFromParams(oc) exutil.By("4. Check OVN service lb status \n") svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("5. Get IP for the OVN service lb \n") var svcIPv6, svcIPv4, podIPv6, podIPv4 string if ipStack == "dualstack" || ipStack == "ipv6single" { svcIPv6, svcIPv4 = getSvcIP(oc, svc.namespace, svc.servicename) } else { svcIPv4, _ = getSvcIP(oc, svc.namespace, svc.servicename) } e2e.Logf("On this %s cluster, IP for service IP are svcIPv6: %s, svcIPv4: %s", ipStack, svcIPv6, svcIPv4) exutil.By("6. Check OVN service lb endpoints in northdb, it should include all running backend test pods \n") allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=test-pods") o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPods)).NotTo(o.BeEquivalentTo(0)) var expectedEndpointsv6, expectedEndpointsv4 []string for _, eachPod := range allPods { if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, eachPod) expectedEndpointsv6 = append(expectedEndpointsv6, "["+podIPv6+"]:8080") expectedEndpointsv4 = append(expectedEndpointsv4, podIPv4+":8080") } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, eachPod) expectedEndpointsv6 = append(expectedEndpointsv6, "["+podIPv6+"]:8080") } else { podIPv4, _ = getPodIP(oc, ns, eachPod) expectedEndpointsv4 = append(expectedEndpointsv4, podIPv4+":8080") } } e2e.Logf("\n On this %s cluster, V6 endpoints of service lb are expected to be: %v\n", ipStack, expectedEndpointsv6) e2e.Logf("\n On this %s cluster, V4 endpoints of service lb are expected to be: %v\n", ipStack, expectedEndpointsv4) // check service lb endpoints in northdb on each node's ovnkube-pod nodeList, nodeErr := exutil.GetAllNodesbyOSType(oc, "linux") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(nodeErr).NotTo(o.HaveOccurred()) o.Expect(len(nodeList)).NotTo(o.BeEquivalentTo(0)) var endpointsv6, endpointsv4 []string var epErr error for _, eachNode := range nodeList { if ipStack == "dualstack" || ipStack == "ipv6single" { endpointsv6, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, "\\["+svcIPv6+"\\]:27017") e2e.Logf("\n Got V6 endpoints of service lb for node %s : %v\n", eachNode, expectedEndpointsv6) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(endpointsv6, expectedEndpointsv6)).Should(o.BeTrue(), fmt.Sprintf("V6 service lb endpoints on node %sdo not match expected endpoints!", eachNode)) } if ipStack == "dualstack" || ipStack == "ipv4single" { endpointsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") e2e.Logf("\n Got V4 endpoints of service lb for node %s : %v\n", eachNode, expectedEndpointsv4) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(endpointsv4, expectedEndpointsv4)).Should(o.BeTrue(), fmt.Sprintf("V4 service lb endpoints on node %sdo not match expected endpoints!", eachNode)) } } exutil.By("7. Scale test pods down to 2 \n") scaleErr := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=2", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) var terminatingPods []string o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 3 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 2") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) var expectedCleanedUpEPsv6, expectedCleanedUpEPsv4, expectedRemindedEPsv6, expectedRemindedEPsv4, actualFinalEPsv6, actualFinalEPsv4 []string for _, eachPod := range terminatingPods { if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, eachPod) expectedCleanedUpEPsv6 = append(expectedCleanedUpEPsv6, "["+podIPv6+"]:8080") expectedCleanedUpEPsv4 = append(expectedCleanedUpEPsv4, podIPv4+":8080") } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, eachPod) expectedCleanedUpEPsv6 = append(expectedCleanedUpEPsv6, "["+podIPv6+"]:8080") } else { podIPv4, _ = getPodIP(oc, ns, eachPod) expectedCleanedUpEPsv4 = append(expectedCleanedUpEPsv4, podIPv4+":8080") } } e2e.Logf("\n On this %s cluster, V6 endpoints of service lb are expected to be cleaned up: %v\n", ipStack, expectedCleanedUpEPsv6) e2e.Logf("\n On this %s cluster, V4 endpoints of service lb are expected to be cleaned up: %v\n", ipStack, expectedCleanedUpEPsv4) runningPods := getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") o.Expect(len(runningPods)).To(o.BeEquivalentTo(2)) e2e.Logf("\n runningPods: %v\n", runningPods) for _, eachPod := range runningPods { if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, eachPod) expectedRemindedEPsv6 = append(expectedRemindedEPsv6, "["+podIPv6+"]:8080") expectedRemindedEPsv4 = append(expectedRemindedEPsv4, podIPv4+":8080") } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, eachPod) expectedRemindedEPsv6 = append(expectedRemindedEPsv6, "["+podIPv6+"]:8080") } else { podIPv4, _ = getPodIP(oc, ns, eachPod) expectedRemindedEPsv4 = append(expectedRemindedEPsv4, podIPv4+":8080") } } e2e.Logf("\n On this %s cluster, V6 endpoints of service lb are expected to remind: %v\n", ipStack, expectedRemindedEPsv6) e2e.Logf("\n On this %s cluster, V4 endpoints of service lb are expected to remind: %v\n", ipStack, expectedRemindedEPsv4) exutil.By("8. Check lb-list entries in northdb again in each node's ovnkube-node pod, only Ready pods' endpoints reminded in service lb endpoints \n") for _, eachNode := range nodeList { if ipStack == "dualstack" || ipStack == "ipv6single" { actualFinalEPsv6, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, "\\["+svcIPv6+"\\]:27017") e2e.Logf("\n\n After scale-down to 2, V6 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv6) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(actualFinalEPsv6, expectedRemindedEPsv6)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V6 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } if ipStack == "dualstack" || ipStack == "ipv4single" { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 2, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } // Verify terminating pods' endpoints are not in final service lb endpoints if ipStack == "dualstack" || ipStack == "ipv6single" { for _, ep := range expectedCleanedUpEPsv6 { o.Expect(isValueInList(ep, actualFinalEPsv6)).ShouldNot(o.BeTrue(), fmt.Sprintf("After scale-down, terminating pod's V6 endpoint %s is not cleaned up from V6 service lb endpoint", ep)) } } if ipStack == "dualstack" || ipStack == "ipv4single" { for _, ep := range expectedCleanedUpEPsv4 { o.Expect(isValueInList(ep, actualFinalEPsv4)).ShouldNot(o.BeTrue(), fmt.Sprintf("After scale-down, terminating pod's V4 endpoint %s is not cleaned up from V4 service lb endpoint", ep)) } } } exutil.By("9. Wait for all three terminating pods from step 7-8 to disappear so that only two running pods are left\n") o.Eventually(func() bool { allPodsWithLabel := getPodName(oc, ns, "name=test-pods") runningPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") return len(runningPods) == len(allPodsWithLabel) }, "180s", "10s").Should(o.BeTrue(), "Terminating pods did not disappear after waiting enough time") exutil.By("10. Scale test pods down to 0 \n") scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=0", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 2 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 0") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) exutil.By("11. Check lb-list entries in northdb again in each node's ovnkube-node pod, verify that the two terminating but serving pods reminded in service lb endpoints \n") // expectedRemindedEPv4 or expectedRemindedEPv6 or both are still expected in NBDB for a little while, // that is because these two pods transition from Running state to terminating but serving state and there is no other running pod available for _, eachNode := range nodeList { if ipStack == "dualstack" || ipStack == "ipv6single" { actualFinalEPsv6, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, "\\["+svcIPv6+"\\]:27017") e2e.Logf("\n\n After scale-down to 0, V6 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv6) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(actualFinalEPsv6, expectedRemindedEPsv6)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V6 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } if ipStack == "dualstack" || ipStack == "ipv4single" { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 0, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } } }) // author: [email protected] g.It("Author:jechen-High-37033-ExternalVM access cluster through externalIP. [Disruptive]", func() { // This is for https://bugzilla.redhat.com/show_bug.cgi?id=1900118 and https://bugzilla.redhat.com/show_bug.cgi?id=1890270 buildPruningBaseDir := exutil.FixturePath("testdata", "networking") externalIPServiceTemplate := filepath.Join(buildPruningBaseDir, "externalip_service1-template.yaml") externalIPPodTemplate := filepath.Join(buildPruningBaseDir, "externalip_pod-template.yaml") var workers, nonExternalIPNodes []string var proxyHost, RDUHost, intf string if !(isPlatformSuitable(oc)) { g.Skip("These cases can only be run on networking team's private RDU clusters, skip for other envrionment!!!") } workers = excludeSriovNodes(oc) if len(workers) < 2 { g.Skip("Not enough nodes, need minimal 2 nodes on RDU for the test, skip the case!!") } msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output() if err != nil || strings.Contains(msg, "sriov.openshift-qe.sdn.com") { proxyHost = "10.8.1.181" RDUHost = "openshift-qe-028.lab.eng.rdu2.redhat.com" intf = "sriovbm" } if err != nil || strings.Contains(msg, "offload.openshift-qe.sdn.com") { proxyHost = "10.8.1.179" RDUHost = "openshift-qe-026.lab.eng.rdu2.redhat.com" intf = "offloadbm" } exutil.By("1. Get namespace, create an externalIP pod in it\n") ns := oc.Namespace() pod1 := externalIPPod{ name: "externalip-pod", namespace: ns, template: externalIPPodTemplate, } defer removeResource(oc, true, true, "pod", pod1.name, "-n", pod1.namespace) pod1.createExternalIPPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("2.Find another node, get its host CIDR, and one unused IP in its subnet \n") externalIPPodNode, err := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(externalIPPodNode).NotTo(o.Equal("")) e2e.Logf("ExternalIP pod is on node: %s", externalIPPodNode) for _, node := range workers { if node != externalIPPodNode { nonExternalIPNodes = append(nonExternalIPNodes, node) } } e2e.Logf("\n nonExternalIPNodes are: %v\n", nonExternalIPNodes) sub := getEgressCIDRsForNode(oc, nonExternalIPNodes[0]) freeIPs := findUnUsedIPsOnNodeOrFail(oc, nonExternalIPNodes[0], sub, 1) o.Expect(len(freeIPs)).Should(o.Equal(1)) exutil.By("4.Patch update network.config with the host CIDR to enable externalIP \n") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+sub+"\"]}}}}") exutil.By("5.Create an externalIP service with the unused IP address obtained above as externalIP\n") svc := externalIPService{ name: "service-unsecure", namespace: ns, externalIP: freeIPs[0], template: externalIPServiceTemplate, } defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) parameters := []string{"--ignore-unknown-parameters=true", "-f", svc.template, "-p", "NAME=" + svc.name, "EXTERNALIP=" + svc.externalIP} exutil.ApplyNsResourceFromTemplate(oc, svc.namespace, parameters...) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.name).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.name)) g.By("Get the Node IP from any node, add a static route on the test runner host to assist the test") nodeIP := getNodeIPv4(oc, ns, nonExternalIPNodes[0]) ipRouteDeleteCmd := "ip route delete " + svc.externalIP defer sshRunCmd(RDUHost, "root", ipRouteDeleteCmd) ipRouteAddCmd := "ip route add " + svc.externalIP + " via " + nodeIP + " dev " + intf err = sshRunCmd(proxyHost, "root", ipRouteAddCmd) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6.Validate the externalIP service from external of the cluster (from test runner)\n") svc4URL := net.JoinHostPort(svc.externalIP, "27017") svcChkCmd := fmt.Sprintf("curl -H 'Cache-Control: no-cache' -x 'http://%s:8888' %s --connect-timeout 5", proxyHost, svc4URL) e2e.Logf("\n svcChkCmd: %v\n", svcChkCmd) output, curlErr := exec.Command("bash", "-c", svcChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") }) // author: [email protected] g.It("Author:jechen-NonHyperShiftHOST-High-43492-ExternalIP for node that has secondary IP. [Disruptive]", func() { // This is for bug https://bugzilla.redhat.com/show_bug.cgi?id=1959798 buildPruningBaseDir := exutil.FixturePath("testdata", "networking") externalIPServiceTemplate := filepath.Join(buildPruningBaseDir, "externalip_service1-template.yaml") externalIPPodTemplate := filepath.Join(buildPruningBaseDir, "externalip_pod-template.yaml") intf := "br-ex" var workers, nonExternalIPNodes []string var proxyHost string platform := exutil.CheckPlatform(oc) msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output() if err != nil || strings.Contains(msg, "sriov.openshift-qe.sdn.com") { platform = "rdu1" proxyHost = "10.8.1.181" } if err != nil || strings.Contains(msg, "offload.openshift-qe.sdn.com") { platform = "rdu2" proxyHost = "10.8.1.179" } if strings.Contains(platform, "rdu1") || strings.Contains(platform, "rdu2") { workers = excludeSriovNodes(oc) if len(workers) < 2 { g.Skip("Not enough nodes, need minimal 2 nodes on RDU for the test, skip the case!!") } } else { nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) // for other non-RDU platforms, need minimal 3 nodes for the test if len(nodeList.Items) < 3 { g.Skip("Not enough worker nodes for this test, skip the case!!") } for _, node := range nodeList.Items { workers = append(workers, node.Name) } } exutil.By("1. Get namespace, create an externalIP pod in it\n") ns := oc.Namespace() pod1 := externalIPPod{ name: "externalip-pod", namespace: ns, template: externalIPPodTemplate, } pod1.createExternalIPPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("2.Find another node, get its host CIDR, and one unused IP in its subnet \n") externalIPPodNode, err := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(externalIPPodNode).NotTo(o.Equal("")) e2e.Logf("ExternalIP pod is on node: %s", externalIPPodNode) for _, node := range workers { if node != externalIPPodNode { nonExternalIPNodes = append(nonExternalIPNodes, node) } } e2e.Logf("\n nonExternalIPNodes are: %v\n", nonExternalIPNodes) sub := getEgressCIDRsForNode(oc, nonExternalIPNodes[0]) freeIPs := findUnUsedIPsOnNodeOrFail(oc, nonExternalIPNodes[0], sub, 1) o.Expect(len(freeIPs)).Should(o.Equal(1)) _, hostIPwithPrefix := getIPv4AndIPWithPrefixForNICOnNode(oc, nonExternalIPNodes[0], intf) prefix := strings.Split(hostIPwithPrefix, "/")[1] e2e.Logf("\n On host %s, prefix of the host ip address: %v\n", nonExternalIPNodes[0], prefix) exutil.By(fmt.Sprintf("3. Add secondary IP %s to br-ex on the node %s", freeIPs[0]+"/"+prefix, nonExternalIPNodes[0])) defer delIPFromInferface(oc, nonExternalIPNodes[0], freeIPs[0], intf) addIPtoInferface(oc, nonExternalIPNodes[0], freeIPs[0]+"/"+prefix, intf) exutil.By("4.Patch update network.config with the host CIDR to enable externalIP \n") original, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("network/cluster", "-ojsonpath={.spec.externalIP}").Output() o.Expect(err).NotTo(o.HaveOccurred()) patch := `[{"op": "replace", "path": "/spec/externalIP", "value": ` + original + `}]` defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("network/cluster", "-p", patch, "--type=json").Execute() patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+sub+"\"]}}}}") exutil.By("5.Create an externalIP service with the unused IP address obtained above as externalIP\n") svc := externalIPService{ name: "service-unsecure", namespace: ns, externalIP: freeIPs[0], template: externalIPServiceTemplate, } defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) parameters := []string{"--ignore-unknown-parameters=true", "-f", svc.template, "-p", "NAME=" + svc.name, "EXTERNALIP=" + svc.externalIP} exutil.ApplyNsResourceFromTemplate(oc, svc.namespace, parameters...) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.name).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.name)) // For RDU, curl the externalIP service from test runner through proxy // For other platforms, since it is hard to get external host on same subnet of the secondary IP, we use another non-externalIP node as simulated test enviornment to validate exutil.By("6.Validate the externalIP service\n") svc4URL := net.JoinHostPort(svc.externalIP, "27017") var host string if platform == "rdu1" || platform == "rdu2" { exutil.By(fmt.Sprintf("On %s, use test runner to validate the externalIP service", platform)) host = proxyHost } else { exutil.By(fmt.Sprintf("On %s, use another non-externalIP node to validate the externalIP service", platform)) host = nonExternalIPNodes[1] } checkSvcErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { if validateService(oc, host, svc4URL) { return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(checkSvcErr, "The externalIP service is not reachable as expected") exutil.By("7.Check OVN-KUBE-EXTERNALIP iptables chain is updated correctly\n") for _, node := range workers { output, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "iptables -n -v -t nat -L OVN-KUBE-EXTERNALIP") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(output, svc.externalIP)).Should(o.BeTrue(), fmt.Sprintf("OVN-KUBE-EXTERNALIP iptables chain was not updated correctly on node %s", node)) } }) // author: [email protected] g.It("Author:jechen-ConnectedOnly-High-24672-ExternalIP configured from autoAssignCIDRs. [Disruptive]", func() { buildPruningBaseDir := exutil.FixturePath("testdata", "networking") pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") platform := exutil.CheckPlatform(oc) e2e.Logf("platform %s", platform) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") if !acceptedPlatform || checkDisconnect(oc) { g.Skip("Test cases should be run on connected GCP, Azure, skip for other platforms or disconnected cluster!!") } // skip if no spec.publicZone specified in dns.config // the private cluster will be skipped as well // refer to https://issues.redhat.com/browse/OCPQE-22704 dnsPublicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns.config/cluster", "-ojsonpath={.spec.publicZone}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if dnsPublicZone == "" { g.Skip("Skip for the platforms that no dns publicZone specified") } nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("Not enough nodes, need 2 nodes for the test, skip the case!!") } exutil.By("1. Get namespace\n") ns := oc.Namespace() svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } // For GCP/Azure, create a loadbalancer service first to get LB service's LB ip address, then derive its subnet to be used in step 3, exutil.By("2. For public cloud platform, create a loadBalancer service first\n") svc.createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("3. Create a test pod\n") pod := pingPodResourceNode{ name: "hello-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, ns, pod.name) exutil.By("4. For GCP/Azure, get LB's ip address\n") svcExternalIP := getLBSVCIP(oc, svc.namespace, svc.servicename) e2e.Logf("Got externalIP service IP: %v", svcExternalIP) o.Expect(svcExternalIP).NotTo(o.BeEmpty()) exutil.By("5. Derive LB's subnet from its IP address\n") ingressLBIP := net.ParseIP(svcExternalIP) if ingressLBIP == nil { g.Skip("Did not get valid IP address for the host of LB service, skip the rest of test!!") } mask := net.CIDRMask(24, 32) // Assuming /24 subnet mask subnet := ingressLBIP.Mask(mask).String() + "/24" e2e.Logf("LB's subnet: %v", subnet) exutil.By("6. Patch update network.config with subnet obtained above to enable autoAssignCIDR for externalIP\n") original, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("network/cluster", "-ojsonpath={.spec.externalIP}").Output() o.Expect(err).NotTo(o.HaveOccurred()) patch := `[{"op": "replace", "path": "/spec/externalIP", "value": ` + original + `}]` defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("network/cluster", "-p", patch, "--type=json").Execute() patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"autoAssignCIDRs\":[\""+subnet+"\"]}}}") patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+subnet+"\"]}}}}") // Wait a little for autoAssignCIDR to take effect time.Sleep(10 * time.Second) exutil.By("7.Curl the externalIP service from test runner\n") svc4URL := net.JoinHostPort(svcExternalIP, "27017") svcChkCmd := fmt.Sprintf("curl %s --connect-timeout 30", svc4URL) e2e.Logf("\n svcChkCmd: %v\n", svcChkCmd) checkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 30*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcChkCmd).Output() if err1 != nil { e2e.Logf("got err:%v, and try next round", err1) return false, nil } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Fail to curl the externalIP service from test runner %s", svc4URL)) }) // author: [email protected] g.It("Author:jechen-High-74601-Verify traffic and OVNK LB endpoints in nbdb for LoadBalancer Service when externalTrafficPolicy is set to Cluster.[Serial]", func() { // For customer bug https://issues.redhat.com/browse/OCPBUGS-24363 // OVNK choose LB endpoints in the following sequence: // 1. when there is/are pods in Ready state, ovnk ONLY choose endpoints of ready pods // 2. When there is/are no ready pods, ovnk choose endpoints that terminating + serving endpoints buildPruningBaseDir := exutil.FixturePath("testdata", "networking") testPodFile := filepath.Join(buildPruningBaseDir, "testpod-with-special-lifecycle.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") platform := exutil.CheckPlatform(oc) scheduleableNodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") if !acceptedPlatform || len(scheduleableNodeList.Items) < 2 { g.Skip("Test cases should be run on GCP or Azure cluster with ovn network plugin, minimal 2 nodes are required, skip for others that do not meet the test requirement") } exutil.By("1. Get namespace, create 2 test pods in it, create a service in front of the test pods \n") ns := oc.Namespace() createResourceFromFile(oc, ns, testPodFile) err = waitForPodWithLabelReady(oc, ns, "name=test-pods") exutil.AssertWaitPollNoErr(err, "Not all test pods with label name=test-pods are ready") exutil.By("2. Create a service in front of the above test pods \n") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "test-pods", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc.createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("3. Get IP for the OVN service lb \n") var svcIPv4, podIPv4, curlSVC4ChkCmd string svcIPv4, _ = getSvcIP(oc, svc.namespace, svc.servicename) curlSVC4ChkCmd = fmt.Sprintf("for i in {1..10}; do curl %s --connect-timeout 5 ; sleep 2;echo ;done", net.JoinHostPort(svcIPv4, "27017")) e2e.Logf("IP for service IP: %s", svcIPv4) exutil.By("4. Before scale down test pods, check OVN service lb endpoints in northdb and traffic at endpoints \n") exutil.By("4.1. Check OVN service lb endpoints in northdb, it should include all running backend test pods \n") allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=test-pods") o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPods)).NotTo(o.BeEquivalentTo(0)) var expectedEndpointsv4 []string podNodeNames := make(map[string]string) podIPv4s := make(map[string]string) for _, eachPod := range allPods { nodeName, getNodeErr := exutil.GetPodNodeName(oc, ns, eachPod) o.Expect(getNodeErr).NotTo(o.HaveOccurred()) podNodeNames[eachPod] = nodeName podIPv4, _ = getPodIP(oc, ns, eachPod) podIPv4s[eachPod] = podIPv4 expectedEndpointsv4 = append(expectedEndpointsv4, podIPv4+":8080") } e2e.Logf("\n V4 endpoints of service lb are expected to be: %v\n", expectedEndpointsv4) // check service lb endpoints in northdb on each node's ovnkube-pod nodeList, nodeErr := exutil.GetAllNodesbyOSType(oc, "linux") o.Expect(nodeErr).NotTo(o.HaveOccurred()) o.Expect(len(nodeList)).NotTo(o.BeEquivalentTo(0)) var endpointsv4 []string var epErr error for _, eachNode := range nodeList { endpointsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") e2e.Logf("\n Got V4 endpoints of service lb for node %s : %v\n", eachNode, expectedEndpointsv4) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(endpointsv4, expectedEndpointsv4)).Should(o.BeTrue(), fmt.Sprintf("V4 service lb endpoints on node %sdo not match expected endpoints!", eachNode)) } exutil.By("4.2. Verify all running pods get traffic \n") var channels [2]chan string // Initialize each channel in the array for i := range channels { channels[i] = make(chan string) } exutil.By(" Start tcpdump on each pod's node") for i, pod := range allPods { go func(i int, pod string) { defer g.GinkgoRecover() tcpdumpCmd := fmt.Sprintf(`timeout 60s tcpdump -c 4 -nneep -i any "(dst port 8080) and (dst %s)"`, podIPv4s[pod]) outputTcpdump, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+podNodeNames[pod], "--", "bash", "-c", tcpdumpCmd).Output() channels[i] <- outputTcpdump }(i, pod) } // add sleep time to let the ping action happen later after tcpdump is enabled. time.Sleep(5 * time.Second) exutil.By(" Curl the externalIP service from test runner\n") output, curlErr := exec.Command("bash", "-c", curlSVC4ChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) for i, pod := range allPods { receivedMsg := <-channels[i] e2e.Logf(" at step 4.2, tcpdumpOutput for node %s is \n%s\n\n", podNodeNames[pod], receivedMsg) o.Expect(strings.Contains(receivedMsg, podIPv4s[pod])).Should(o.BeTrue()) } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") exutil.By("5. Scale test pods down to 1 \n") scaleErr := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=1", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) allPods = allPods[:0] var terminatingPods []string o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 1 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 1") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) allPods = append(allPods, terminatingPods[0]) var expectedCleanedUpEPsv4, expectedRemindedEPsv4, actualFinalEPsv4 []string for _, eachPod := range terminatingPods { expectedCleanedUpEPsv4 = append(expectedCleanedUpEPsv4, podIPv4s[eachPod]+":8080") } e2e.Logf("\n V4 endpoints of service lb are expected to be cleaned up: %v\n", expectedCleanedUpEPsv4) runningPods := getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") o.Expect(len(runningPods)).To(o.BeEquivalentTo(1)) e2e.Logf("\n runningPods: %v\n", runningPods) allPods = append(allPods, runningPods[0]) for _, eachPod := range runningPods { expectedRemindedEPsv4 = append(expectedRemindedEPsv4, podIPv4s[eachPod]+":8080") } e2e.Logf("\n V4 endpoints of service lb are expected to remind: %v\n", expectedRemindedEPsv4) exutil.By("5.1. Check lb-list entries in northdb again in each node's ovnkube-node pod, only Ready pods' endpoints reminded in service lb endpoints \n") for _, eachNode := range nodeList { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 2, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) // Verify terminating pods' endpoints are not in final service lb endpoints for _, ep := range expectedCleanedUpEPsv4 { o.Expect(isValueInList(ep, actualFinalEPsv4)).ShouldNot(o.BeTrue(), fmt.Sprintf("After scale-down, terminating pod's V4 endpoint %s is not cleaned up from V4 service lb endpoint", ep)) } } exutil.By("5.2 Verify only the running pod receives traffic, the terminating pod does not receive traffic \n") for i, pod := range allPods { go func(i int, pod string) { defer g.GinkgoRecover() tcpdumpCmd := fmt.Sprintf(`timeout 60s tcpdump -c 4 -nneep -i any "(dst port 8080) and (dst %s)"`, podIPv4s[pod]) outputTcpdump, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+podNodeNames[pod], "--", "bash", "-c", tcpdumpCmd).Output() channels[i] <- outputTcpdump }(i, pod) } // add sleep time to let the ping action happen later after tcpdump is enabled. time.Sleep(5 * time.Second) output, curlErr = exec.Command("bash", "-c", curlSVC4ChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) for i, pod := range allPods { receivedMsg := <-channels[i] e2e.Logf(" at step 5.2, tcpdumpOutput for node %s is \n%s\n\n", podNodeNames[pod], receivedMsg) if pod == terminatingPods[0] { o.Expect(strings.Contains(receivedMsg, "0 packets captured")).Should(o.BeTrue()) } else { o.Expect(strings.Contains(receivedMsg, podIPv4s[pod])).Should(o.BeTrue()) } } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") exutil.By("5.3. Wait for terminating pod from step 7 to disappear so that there is only one running pod left\n") o.Eventually(func() bool { allPodsWithLabel := getPodName(oc, ns, "name=test-pods") runningPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") return len(runningPods) == len(allPodsWithLabel) }, "180s", "10s").Should(o.BeTrue(), "Terminating pods did not disappear after waiting enough time") exutil.By("6. Scale test pods down to 0 \n") scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=0", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 1 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 0") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) exutil.By("6.1. Check lb-list entries in northdb again in each node's ovnkube-node pod, verify that the two terminating but serving pods reminded in service lb endpoints \n") // expectedRemindedEPv4 are still expected in NBDB for a little while, // that is because the last pod transition from Running state to terminating but serving state and there is no other running pod available for _, eachNode := range nodeList { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 0, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } exutil.By("6.2 Verify that the terminating pod still receives traffic because there is no other running pod\n") for i, pod := range terminatingPods { go func(i int, pod string) { defer g.GinkgoRecover() tcpdumpCmd := fmt.Sprintf(`timeout 60s tcpdump -c 4 -nneep -i any "(dst port 8080) and (dst %s)"`, podIPv4s[pod]) outputTcpdump, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+podNodeNames[pod], "--", "bash", "-c", tcpdumpCmd).Output() channels[i] <- outputTcpdump }(i, pod) } // add sleep time to let the ping action happen later after tcpdump is enabled. time.Sleep(5 * time.Second) output, curlErr = exec.Command("bash", "-c", curlSVC4ChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) for i, pod := range terminatingPods { receivedMsg := <-channels[i] e2e.Logf(" at step 6.2, tcpdumpOutput for node %s is \n%s\n\n", podNodeNames[pod], receivedMsg) o.Expect(strings.Contains(receivedMsg, podIPv4s[pod])).Should(o.BeTrue()) } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") }) g.It("Author:asood-Medium-75424-SessionAffinity does not work after scaling down the Pods", func() { //Bug: https://issues.redhat.com/browse/OCPBUGS-28604 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") servicesBaseDir = exutil.FixturePath("testdata", "networking/services") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml") sessionAffinitySvcTemplate = filepath.Join(servicesBaseDir, "sessionaffinity-svc-template.yaml") customResponsePodTemplate = filepath.Join(servicesBaseDir, "custom-response-pod-template.yaml") labelKey = "name" labelVal = "openshift" testID = "75424" curlCmdList = []string{} ) ns := oc.Namespace() exutil.By(fmt.Sprintf("Create pods that will serve as the endpoints for Session Affinity enabled service in %s project", ns)) customResponsePod := customResponsePodResource{ name: " ", namespace: ns, labelKey: labelKey, labelVal: labelVal, responseStr: " ", template: customResponsePodTemplate, } for i := 0; i < 3; i++ { customResponsePod.name = "hello-pod-" + strconv.Itoa(i) customResponsePod.responseStr = "Hello from " + customResponsePod.name customResponsePod.createCustomResponsePod(oc) waitPodReady(oc, ns, customResponsePod.name) } exutil.By(fmt.Sprintf("Create a test pod in %s", ns)) testPod := pingPodResource{ name: "test-pod", namespace: ns, template: pingPodTemplate, } testPod.createPingPod(oc) waitPodReady(oc, ns, testPod.name) svc := sessionAffinityServiceResource{ name: " ", namespace: ns, ipFamilyPolicy: " ", selLabelKey: labelKey, SelLabelVal: labelVal, template: sessionAffinitySvcTemplate, } ipStackType := checkIPStackType(oc) exutil.By(fmt.Sprintf("Create a service with session affinity enabled on %s cluster", ipStackType)) if ipStackType == "dualstack" { svc.name = "dualstacksvc-" + testID svc.ipFamilyPolicy = "PreferDualStack" defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) svc.createSessionAffiniltyService(oc) svcOutput, svcErr := oc.AsAdmin().Run("get").Args("service", "-n", svc.namespace).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring(svc.name)) serviceIPv6, serviceIPv4 := getSvcIP(oc, svc.namespace, svc.name) curlCmdList = append(curlCmdList, fmt.Sprintf("curl %s:8080 --connect-timeout 5", serviceIPv4)) curlCmdList = append(curlCmdList, fmt.Sprintf("curl -g -6 [%s]:8080 --connect-timeout 5", serviceIPv6)) } else { svc.ipFamilyPolicy = "SingleStack" svc.name = "singlestack-" + ipStackType + "-svc-" + testID defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) svc.createSessionAffiniltyService(oc) svcOutput, svcErr := oc.AsAdmin().Run("get").Args("service", "-n", svc.namespace).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring(svc.name)) if ipStackType == "ipv6single" { serviceIPv6, _ := getSvcIP(oc, svc.namespace, svc.name) curlCmdList = append(curlCmdList, fmt.Sprintf("curl -g -6 [%s]:8080 --connect-timeout 5", serviceIPv6)) } else { serviceIPv4 := getSvcIPv4(oc, svc.namespace, svc.name) curlCmdList = append(curlCmdList, fmt.Sprintf("curl %s:8080 --connect-timeout 5", serviceIPv4)) } } for _, curlCmd := range curlCmdList { exutil.By(fmt.Sprintf("Test session affinity using request '%s' cluster", curlCmd)) e2e.Logf("Send first request to service") firstResponse1, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) e2e.Logf("Request response: %s", firstResponse1) for i := 0; i < 9; i++ { requestResp, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(requestResp, firstResponse1)).To(o.BeTrue()) } e2e.Logf("Find the pod serving request and delete it") respStr := strings.Split(strings.TrimRight(firstResponse1, "\n"), " ") o.Expect(len(respStr)).To(o.BeEquivalentTo(3)) o.Expect(respStr[2]).NotTo(o.BeEmpty()) removeResource(oc, true, true, "pod", respStr[2], "-n", ns) e2e.Logf(fmt.Sprintf("Send first request to service after deleting the previously serving pod %s", respStr[2])) firstResponse2, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) e2e.Logf("Request response: %s", firstResponse2) o.Expect(strings.Contains(firstResponse2, firstResponse1)).To(o.BeFalse()) for i := 0; i < 9; i++ { requestResp, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(requestResp, firstResponse2)).To(o.BeTrue()) } } }) g.It("Author:meinli-Critical-78262-Validate pod/host to hostnetwork pod/nodeport with hostnetwork pod backend on same/diff workers", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) platform := exutil.CheckPlatform(oc) if !(strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "none")) { g.Skip("These cases can only be run on networking team's private RDU BM cluster, vSphere and IPI/UPI BM, skip for other platforms!!!") } exutil.By("1. Get namespace, master and worker node") ns := oc.Namespace() nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } masterNode, err := exutil.GetFirstMasterNode(oc) o.Expect(err).NotTo(o.HaveOccurred()) //Required for hostnetwork pod err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2. Create hostnetwork pod in ns") hostpod := pingPodResourceNode{ name: "hostnetwork-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: hostNetworkPodTemplate, } hostpod.createPingPodNode(oc) waitPodReady(oc, ns, hostpod.name) exutil.By("3. Create nodeport service with hostnetwork pod backend when externalTrafficPolicy=Local") ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { ipFamilyPolicy = "PreferDualStack" } svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "NodePort", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Local", template: genericServiceTemplate, } svc.createServiceFromParams(oc) nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("4. Create two normal pods on diff workers") pods := make([]pingPodResourceNode, 2) for i := 0; i < 2; i++ { pods[i] = pingPodResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nodename: nodeList.Items[i].Name, template: pingPodNodeTemplate, } pods[i].createPingPodNode(oc) waitPodReady(oc, ns, pods[i].name) defer exutil.LabelPod(oc, ns, pods[i].name, "name-") err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns, "pod", pods[i].name, fmt.Sprintf("name=hello-pod-%d", i), "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("5. Validate host to pod on same/diff workers") CurlNode2PodPass(oc, pods[0].nodename, ns, pods[0].name) CurlNode2PodPass(oc, pods[1].nodename, ns, pods[0].name) exutil.By("6. Validate pod to host network pod on same/diff workers") CurlPod2PodPass(oc, ns, pods[0].name, ns, hostpod.name) CurlPod2PodPass(oc, ns, pods[1].name, ns, hostpod.name) exutil.By("7. Validate pod to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Local") CurlPod2NodePortPass(oc, ns, pods[0].name, nodeList.Items[0].Name, nodePort) CurlPod2NodePortFail(oc, ns, pods[0].name, nodeList.Items[1].Name, nodePort) exutil.By("8. Validate host to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Local") CurlNodePortPass(oc, masterNode, nodeList.Items[0].Name, nodePort) CurlNodePortFail(oc, masterNode, nodeList.Items[1].Name, nodePort) exutil.By("9. Validate pod to nodeport with hostnetwork pod backend on diff workers when externalTrafficPolicy=Cluster") exutil.By("9.1 Create nodeport service with externalTrafficPolicy=Cluster in ns1 and ns2") removeResource(oc, true, true, "svc", "test-service", "-n", ns) svc.externalTrafficPolicy = "Cluster" svc.createServiceFromParams(oc) nodePort, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9.2 Validate pod to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Cluster") CurlPod2NodePortPass(oc, ns, pods[0].name, nodeList.Items[0].Name, nodePort) CurlPod2NodePortPass(oc, ns, pods[0].name, nodeList.Items[1].Name, nodePort) exutil.By("10. Validate host to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Cluster") CurlNodePortPass(oc, masterNode, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, masterNode, nodeList.Items[1].Name, nodePort) }) })
package networking
test case
openshift/openshift-tests-private
eb8df288-b322-4fe3-91ec-e000d7ee6dc3
Author:huirwang-WRS-High-50347-V-ACS.04-[FdpOvnOvs] internalTrafficPolicy set Local for pod/node to service access
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:huirwang-WRS-High-50347-V-ACS.04-[FdpOvnOvs] internalTrafficPolicy set Local for pod/node to service access", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } g.By("Create a namespace") oc.SetupProject() ns1 := oc.Namespace() g.By("create 1st hello pod in ns1") pod1 := pingPodResourceNode{ name: "hello-pod1", namespace: ns1, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod1.createPingPodNode(oc) waitPodReady(oc, ns1, pod1.name) g.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Local", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } svc.ipFamilyPolicy = "SingleStack" svc.createServiceFromParams(oc) g.By("Create second namespace") oc.SetupProject() ns2 := oc.Namespace() g.By("Create a pod hello-pod2 in second namespace, pod located the same node") pod2 := pingPodResourceNode{ name: "hello-pod2", namespace: ns2, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod2.createPingPodNode(oc) waitPodReady(oc, ns2, pod2.name) g.By("Create second pod hello-pod3 in second namespace, pod located on the different node") pod3 := pingPodResourceNode{ name: "hello-pod3", namespace: ns2, nodename: nodeList.Items[1].Name, template: pingPodNodeTemplate, } pod3.createPingPodNode(oc) waitPodReady(oc, ns2, pod3.name) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failling") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node0 to service:port") CurlNode2SvcPass(oc, pod1.nodename, ns1, "test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { g.By("Delete testservice from ns") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking pod to svc:port behavior now on with PreferDualStack Service") svc.ipFamilyPolicy = "PreferDualStack" svc.createServiceFromParams(oc) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failling") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node0 to service:port") //Due to bug 2078691,skip below step for now. //CurlNode2SvcPass(oc, pod1.nodename, ns1,"test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") } })
test case
openshift/openshift-tests-private
cf14acca-79b9-4928-947c-2eaf4cb4d127
Author:huirwang-WRS-High-50348-V-ACS.04-[FdpOvnOvs] internalTrafficPolicy set Local for pod/node to service access with hostnetwork pod backend. [Serial]
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:huirwang-WRS-High-50348-V-ACS.04-[FdpOvnOvs] internalTrafficPolicy set Local for pod/node to service access with hostnetwork pod backend. [Serial]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } g.By("Create a namespace") oc.SetupProject() ns1 := oc.Namespace() //Required for hostnetwork pod err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("create 1st hello pod in ns1") pod1 := pingPodResourceNode{ name: "hello-pod1", namespace: ns1, nodename: nodeList.Items[0].Name, template: hostNetworkPodTemplate, } pod1.createPingPodNode(oc) waitPodReady(oc, ns1, pod1.name) g.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service", namespace: ns1, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Local", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } svc.ipFamilyPolicy = "SingleStack" svc.createServiceFromParams(oc) g.By("Create second namespace") oc.SetupProject() ns2 := oc.Namespace() g.By("Create a pod hello-pod2 in second namespace, pod located the same node") pod2 := pingPodResourceNode{ name: "hello-pod2", namespace: ns2, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod2.createPingPodNode(oc) waitPodReady(oc, ns2, pod2.name) g.By("Create second pod hello-pod3 in second namespace, pod located on the different node") pod3 := pingPodResourceNode{ name: "hello-pod3", namespace: ns2, nodename: nodeList.Items[1].Name, template: pingPodNodeTemplate, } pod3.createPingPodNode(oc) waitPodReady(oc, ns2, pod3.name) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failing") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") g.By("Curl from node0 to service:port") CurlNode2SvcPass(oc, nodeList.Items[0].Name, ns1, "test-service") ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { g.By("Delete testservice from ns") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Checking pod to svc:port behavior now on with PreferDualStack Service") svc.ipFamilyPolicy = "PreferDualStack" svc.createServiceFromParams(oc) g.By("curl from hello-pod2 to service:port") CurlPod2SvcPass(oc, ns2, ns1, "hello-pod2", "test-service") g.By("curl from hello-pod3 to service:port should be failing") CurlPod2SvcFail(oc, ns2, ns1, "hello-pod3", "test-service") g.By("Curl from node1 to service:port") CurlNode2SvcFail(oc, nodeList.Items[1].Name, ns1, "test-service") } })
test case
openshift/openshift-tests-private
d9be9d5e-b4fa-4a2e-b4e1-09d25f021071
Author:weliang-Medium-57344-Add support for service session affinity timeout
['"path/filepath"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:weliang-Medium-57344-Add support for service session affinity timeout", func() { //Bug: https://issues.redhat.com/browse/OCPBUGS-4502 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") servicesBaseDir = exutil.FixturePath("testdata", "networking/services") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml") sessionAffinitySvcv4 = filepath.Join(servicesBaseDir, "sessionaffinity-svcv4.yaml") sessionAffinitySvcdualstack = filepath.Join(servicesBaseDir, "sessionaffinity-svcdualstack.yaml") sessionAffinityPod1 = filepath.Join(servicesBaseDir, "sessionaffinity-pod1.yaml") sessionAffinityPod2 = filepath.Join(servicesBaseDir, "sessionaffinity-pod2.yaml") ) ns1 := oc.Namespace() g.By("create two pods which will be the endpoints for sessionaffinity service in ns1") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinityPod1, "-n", ns1).Execute() defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinityPod2, "-n", ns1).Execute() createResourceFromFile(oc, ns1, sessionAffinityPod1) waitPodReady(oc, ns1, "blue-pod-1") createResourceFromFile(oc, ns1, sessionAffinityPod2) waitPodReady(oc, ns1, "blue-pod-2") g.By("create a testing pod in ns1") pod1 := pingPodResource{ name: "hello-pod1", namespace: ns1, template: pingPodTemplate, } defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1.name, "-n", pod1.namespace).Execute() pod1.createPingPod(oc) waitPodReady(oc, ns1, pod1.name) ipStackType := checkIPStackType(oc) if ipStackType == "ipv4single" { g.By("test ipv4 singlestack cluster") g.By("create a sessionaffinity service in ns1") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinitySvcv4, "-n", ns1).Execute() createsvcerr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", sessionAffinitySvcv4, "-n", ns1).Execute() o.Expect(createsvcerr).NotTo(o.HaveOccurred()) svcoutput, svcerr := oc.AsAdmin().Run("get").Args("service", "-n", ns1).Output() o.Expect(svcerr).NotTo(o.HaveOccurred()) o.Expect(svcoutput).To(o.ContainSubstring("sessionaffinitysvcv4")) serviceIPv4 := getSvcIPv4(oc, ns1, "sessionaffinitysvcv4") // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will LB after curl sleep more than 10s g.By("Traffic will LB to two endpoints with sleep 15s in curl") trafficoutput, trafficerr := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 11; done") o.Expect(trafficerr).NotTo(o.HaveOccurred()) if strings.Contains(trafficoutput, "Hello Blue Pod-1") && strings.Contains(trafficoutput, "Hello Blue Pod-2") { e2e.Logf("Pass : Traffic LB to two endpoints when curl sleep more than 10s") } else { e2e.Failf("Fail: Traffic does not LB to two endpoints when curl sleep more than 10s") } // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will not LB after curl sleep less than 10s g.By("Traffic will not LB to two endpoints without sleep 15s in curl") trafficoutput1, trafficerr1 := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 9; done") o.Expect(trafficerr1).NotTo(o.HaveOccurred()) if (strings.Contains(trafficoutput1, "Hello Blue Pod-1") && !strings.Contains(trafficoutput1, "Hello Blue Pod-2")) || (strings.Contains(trafficoutput1, "Hello Blue Pod-2") && !strings.Contains(trafficoutput1, "Hello Blue Pod-1")) { e2e.Logf("Pass : Traffic does not LB to two endpoints when curl sleep less than 10s") } else { e2e.Failf("Fail: Traffic LB to two endpoints when curl sleep less than 10s") } } if ipStackType == "dualstack" { g.By("test dualstack cluster") g.By("create a sessionaffinity service in ns1") defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", sessionAffinitySvcdualstack, "-n", ns1).Execute() createsvcerr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", sessionAffinitySvcdualstack, "-n", ns1).Execute() o.Expect(createsvcerr).NotTo(o.HaveOccurred()) svcoutput, svcerr := oc.AsAdmin().Run("get").Args("service", "-n", ns1).Output() o.Expect(svcerr).NotTo(o.HaveOccurred()) o.Expect(svcoutput).To(o.ContainSubstring("sessionaffinitysvcdualstack")) serviceIPv4 := getSvcIPv4(oc, ns1, "sessionaffinitysvcdualstack") serviceIPv6 := getSvcIPv6(oc, ns1, "sessionaffinitysvcdualstack") // Test ipv4 traffic in dualstack cluster // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will LB after curl sleep more than 10s g.By("Traffic will LB to two endpoints with sleep 15s in curl") trafficoutput, trafficerr := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 11; done") o.Expect(trafficerr).NotTo(o.HaveOccurred()) if strings.Contains(trafficoutput, "Hello Blue Pod-1") && strings.Contains(trafficoutput, "Hello Blue Pod-2") { e2e.Logf("Pass : Traffic LB to two endpoints when curl sleep more than 10s") } else { e2e.Failf("Fail: Traffic does not LB to two endpoints when curl sleep more than 10s") } // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will not LB after curl sleep less than 10s g.By("Traffic will not LB to two endpoints without sleep 15s in curl") trafficoutput1, trafficerr1 := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl "+serviceIPv4+":8080; sleep 9; done") o.Expect(trafficerr1).NotTo(o.HaveOccurred()) if (strings.Contains(trafficoutput1, "Hello Blue Pod-1") && !strings.Contains(trafficoutput1, "Hello Blue Pod-2")) || (strings.Contains(trafficoutput1, "Hello Blue Pod-2") && !strings.Contains(trafficoutput1, "Hello Blue Pod-1")) { e2e.Logf("Pass : Traffic does not LB to two endpoints when curl sleep less than 10s") } else { e2e.Failf("Fail: Traffic LB to two endpoints when curl sleep less than 10s") } // Tes ipv6 traffic in dualstack cluster // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will LB after curl sleep more than 10s g.By("Traffic will LB to two endpoints with sleep 15s in curl") v6trafficoutput, v6trafficerr := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl -g -6 ["+serviceIPv6+"]:8080; sleep 11; done") o.Expect(v6trafficerr).NotTo(o.HaveOccurred()) if strings.Contains(v6trafficoutput, "Hello Blue Pod-1") && strings.Contains(v6trafficoutput, "Hello Blue Pod-2") { e2e.Logf("Pass : Traffic LB to two endpoints when curl sleep more than 10s") } else { e2e.Failf("Fail: Traffic does not LB to two endpoints when curl sleep more than 10s") } // timeoutSeconds in sessionAffinityConfig is set 10s, traffic will not LB after curl sleep less than 10s g.By("Traffic will not LB to two endpoints without sleep 15s in curl") v6trafficoutput1, v6trafficerr1 := e2eoutput.RunHostCmd(ns1, pod1.name, "for i in 1 2 3 4 5 6 7 8 9 10; do curl -g -6 ["+serviceIPv6+"]:8080; sleep 9; done") o.Expect(v6trafficerr1).NotTo(o.HaveOccurred()) if (strings.Contains(v6trafficoutput1, "Hello Blue Pod-1") && !strings.Contains(v6trafficoutput1, "Hello Blue Pod-2")) || (strings.Contains(v6trafficoutput1, "Hello Blue Pod-2") && !strings.Contains(v6trafficoutput1, "Hello Blue Pod-1")) { e2e.Logf("Pass : Traffic does not LB to two endpoints when curl sleep less than 10s") } else { e2e.Failf("Fail: Traffic LB to two endpoints when curl sleep less than 10s") } } })
test case
openshift/openshift-tests-private
0bbf4e49-b3b4-42cc-af60-a032b3811158
Longduration-NonPreRelease-Author:asood-High-62293-[FdpOvnOvs] Validate all the constructs are created on logical routers and logical switches for a service type loadbalancer. [Disruptive]
['"fmt"', '"path/filepath"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Longduration-NonPreRelease-Author:asood-High-62293-[FdpOvnOvs] Validate all the constructs are created on logical routers and logical switches for a service type loadbalancer. [Disruptive]", func() { // Bug: https://issues.redhat.com/browse/OCPBUGS-5930 (Duplicate bug https://issues.redhat.com/browse/OCPBUGS-7000) var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") svcEndpoints []svcEndpontDetails lsConstruct string lrConstruct string ) platform := exutil.CheckPlatform(oc) //vSphere does not have LB service support yet e2e.Logf("platform %s", platform) if !(strings.Contains(platform, "gcp") || strings.Contains(platform, "aws") || strings.Contains(platform, "azure")) { g.Skip("Skip for non-supported auto scaling machineset platforms!!") } workerNodes, err := exutil.GetClusterNodesBy(oc, "worker") o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Get namespace") ns := oc.Namespace() exutil.By(fmt.Sprintf("create 1st hello pod in %s", ns)) pod := pingPodResourceNode{ name: "hello-pod1", namespace: ns, nodename: workerNodes[0], template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, ns, pod.name) exutil.By("Create a test service which is in front of the above pod") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("Create a new machineset to add new nodes") clusterinfra.SkipConditionally(oc) infrastructureName := clusterinfra.GetInfrastructureName(oc) machinesetName := infrastructureName + "-62293" ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 2} defer ms.DeleteMachineSet(oc) ms.CreateMachineSet(oc) clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName) machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName) nodeName0 := clusterinfra.GetNodeNameFromMachine(oc, machineName[0]) nodeName1 := clusterinfra.GetNodeNameFromMachine(oc, machineName[1]) e2e.Logf("The nodes %s and %s added successfully", nodeName0, nodeName1) exutil.By(fmt.Sprintf("create 2nd hello pod in %s on newly created node %s", ns, nodeName0)) pod = pingPodResourceNode{ name: "hello-pod2", namespace: ns, nodename: nodeName0, template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, ns, pod.name) exutil.By("Get backend pod details of user service") allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=hello-pod") o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPods)).NotTo(o.BeEquivalentTo(0)) for _, eachPod := range allPods { nodeName, nodeNameErr := exutil.GetPodNodeName(oc, ns, eachPod) o.Expect(nodeNameErr).NotTo(o.HaveOccurred()) podIP := getPodIPv4(oc, ns, eachPod) ovnkubeNodePod, ovnKubeNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(ovnKubeNodePodErr).NotTo(o.HaveOccurred()) svcEndpoint := svcEndpontDetails{ ovnKubeNodePod: ovnkubeNodePod, nodeName: nodeName, podIP: podIP, } svcEndpoints = append(svcEndpoints, svcEndpoint) } exutil.By("Get logical route and switch on node for endpoints of both services to validate they exist on both new and old node") for _, eachEndpoint := range svcEndpoints { lsConstruct = eachEndpoint.getOVNConstruct(oc, "ls-list") o.Expect(lsConstruct).NotTo(o.BeEmpty()) e2e.Logf("Logical Switch %s on node %s", lsConstruct, eachEndpoint.nodeName) o.Expect(eachEndpoint.getOVNLBContruct(oc, "ls-lb-list", lsConstruct)).To(o.BeTrue()) lrConstruct = eachEndpoint.getOVNConstruct(oc, "lr-list") o.Expect(lrConstruct).NotTo(o.BeEmpty()) e2e.Logf("Logical Router %s on node %s", lrConstruct, eachEndpoint.nodeName) o.Expect(eachEndpoint.getOVNLBContruct(oc, "lr-lb-list", lrConstruct)).To(o.BeTrue()) } exutil.By("Validate kubernetes service is reachable from all nodes including new nodes") allNodes, nodeErr := exutil.GetAllNodes(oc) o.Expect(nodeErr).NotTo(o.HaveOccurred()) o.Expect(len(allNodes)).NotTo(o.BeEquivalentTo(0)) for i := 0; i < len(allNodes); i++ { output, err := exutil.DebugNodeWithChroot(oc, allNodes[i], "bash", "-c", "curl -s -k https://172.30.0.1/healthz --connect-timeout 5") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(output, "ok")).To(o.BeTrue()) } })
test case
openshift/openshift-tests-private
61e3166c-4e21-4c0c-a7e2-872010fcabbf
Longduration-NonPreRelease-Author:asood-High-63156-Verify the nodeport is not allocated to VIP based LoadBalancer service type. [Disruptive]
['"path/filepath"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Longduration-NonPreRelease-Author:asood-High-63156-Verify the nodeport is not allocated to VIP based LoadBalancer service type. [Disruptive]", func() { // LoadBalancer service implementation are different on cloud provider and bare metal platform // https://issues.redhat.com/browse/OCPBUGS-10874 (aws and azure pending support) var ( testDataDir = exutil.FixturePath("testdata", "networking/metallb") loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml") serviceLabelKey = "environ" serviceLabelValue = "Test" svc_names = [2]string{"hello-world-cluster", "hello-world-local"} svc_etp = [2]string{"Cluster", "Local"} namespaces []string ) platform := exutil.CheckPlatform(oc) e2e.Logf("platform %s", platform) if !(strings.Contains(platform, "gcp")) { g.Skip("Skip for non-supported platorms!") } masterNodes, err := exutil.GetClusterNodesBy(oc, "master") o.Expect(err).NotTo(o.HaveOccurred()) g.By("Get first namespace and create another") ns := oc.Namespace() namespaces = append(namespaces, ns) oc.SetupProject() ns = oc.Namespace() namespaces = append(namespaces, ns) var desiredMode string origMode := getOVNGatewayMode(oc) defer switchOVNGatewayMode(oc, origMode) g.By("Validate services in original gateway mode " + origMode) for j := 0; j < 2; j++ { for i := 0; i < 2; i++ { svcName := svc_names[i] + "-" + strconv.Itoa(j) g.By("Create a service " + svc_names[i] + " with ExternalTrafficPolicy " + svc_etp[i]) svc := loadBalancerServiceResource{ name: svcName, namespace: namespaces[i], externaltrafficpolicy: svc_etp[i], labelKey: serviceLabelKey, labelValue: serviceLabelValue, allocateLoadBalancerNodePorts: false, template: loadBalancerServiceTemplate, } result := createLoadBalancerService(oc, svc, loadBalancerServiceTemplate) o.Expect(result).To(o.BeTrue()) g.By("Check LoadBalancer service status") err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name) o.Expect(err).NotTo(o.HaveOccurred()) g.By("Get LoadBalancer service IP") svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name) g.By("Validate service") result = validateService(oc, masterNodes[0], svcIP) o.Expect(result).To(o.BeTrue()) g.By("Check nodePort is not assigned to service") nodePort := getLoadBalancerSvcNodePort(oc, svc.namespace, svc.name) o.Expect(nodePort).To(o.BeEmpty()) } if j == 0 { g.By("Change the shared gateway mode to local gateway mode") if origMode == "local" { desiredMode = "shared" } else { desiredMode = "local" } e2e.Logf("Cluster is currently on gateway mode %s", origMode) e2e.Logf("Desired mode is %s", desiredMode) switchOVNGatewayMode(oc, desiredMode) g.By("Validate services in modified gateway mode " + desiredMode) } } })
test case
openshift/openshift-tests-private
4aa2277e-56e2-41b4-b970-046249a95b66
NonHyperShiftHOST-NonPreRelease-Longduration-Author:huirwang-Medium-65796-Recreated service should have correct load_balancer nb entries for same name load_balancer. [Serial]
['"fmt"', '"path/filepath"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:huirwang-Medium-65796-Recreated service should have correct load_balancer nb entries for same name load_balancer. [Serial]", func() { // From customer bug https://issues.redhat.com/browse/OCPBUGS-11716 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) exutil.By("Get namespace ") ns := oc.Namespace() exutil.By("create hello pod in namespace") pod1 := pingPodResource{ name: "hello-pod", namespace: ns, template: pingPodTemplate, } pod1.createPingPod(oc) waitPodReady(oc, ns, pod1.name) ipStack := checkIPStackType(oc) var podIPv6, podIPv4 string if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, pod1.name) } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, pod1.name) } else { podIPv4, _ = getPodIP(oc, ns, pod1.name) } exutil.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } if ipStack == "dualstack" { svc.ipFamilyPolicy = "PreferDualStack" } else { svc.ipFamilyPolicy = "SingleStack" } svc.createServiceFromParams(oc) exutil.By("Check service status") svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("Get service IP") var svcIP6, svcIP4, clusterVIP string if ipStack == "dualstack" || ipStack == "ipv6single" { svcIP6, svcIP4 = getSvcIP(oc, svc.namespace, svc.servicename) } else { svcIP4, _ = getSvcIP(oc, svc.namespace, svc.servicename) } e2e.Logf("ipstack type: %s, SVC's IPv4: %s, SVC's IPv6: %s", ipStack, svcIP4, svcIP6) exutil.By("Check nb loadbalancer entries") ovnPod := getOVNKMasterOVNkubeNode(oc) o.Expect(ovnPod).ShouldNot(o.BeEmpty()) e2e.Logf("\n ovnKMasterPod: %v\n", ovnPod) lbCmd := fmt.Sprintf("ovn-nbctl --column vip find load_balancer name=Service_%s/%s_TCP_cluster", ns, svc.servicename) lbOutput, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmd) e2e.Logf("\nlbOutput: %s\n", lbOutput) o.Expect(err).NotTo(o.HaveOccurred()) if ipStack == "dualstack" || ipStack == "ipv6single" { clusterVIP = fmt.Sprintf("\"[%s]:%s\"=\"[%s]:%s\"", svcIP6, "27017", podIPv6, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } if ipStack == "dualstack" || ipStack == "ipv4single" { clusterVIP = fmt.Sprintf("\"%s:%s\"=\"%s:%s\"", svcIP4, "27017", podIPv4, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } exutil.By("Delete svc") removeResource(oc, true, true, "service", svc.servicename, "-n", ns) exutil.By("Manually add load_balancer entry in nb with same name as previous one.") // no need to defer to remove, as this will be overrided by following service recreated. var lbCmdAdd string if ipStack == "dualstack" || ipStack == "ipv6single" { lbCmdAdd = fmt.Sprintf("ovn-nbctl lb-add \"Service_%s/%s_TCP_cluster\" [%s]:%s [%s]:%s", ns, svc.servicename, svcIP6, "27017", podIPv6, "8080") _, err = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmdAdd) o.Expect(err).NotTo(o.HaveOccurred()) } if ipStack == "dualstack" || ipStack == "ipv4single" { lbCmdAdd = fmt.Sprintf("ovn-nbctl lb-add \"Service_%s/%s_TCP_cluster\" %s:%s %s:%s", ns, svc.servicename, svcIP4, "27017", podIPv4, "8080") _, err = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmdAdd) o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("Recreate svc") svc.createServiceFromParams(oc) svcOutput, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("Get service IP again") if ipStack == "dualstack" || ipStack == "ipv6single" { svcIP6, svcIP4 = getSvcIP(oc, svc.namespace, svc.servicename) } else { svcIP4, _ = getSvcIP(oc, svc.namespace, svc.servicename) } e2e.Logf("ipstack type: %s, recreated SVC's IPv4: %s, SVC's IPv6: %s", ipStack, svcIP4, svcIP6) exutil.By("No error logs") podlogs, getLogsErr := oc.AsAdmin().Run("logs").Args(ovnPod, "-n", "openshift-ovn-kubernetes", "-c", "ovnkube-controller", "--since", "90s").Output() o.Expect(getLogsErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(podlogs, "failed to ensure service")).ShouldNot(o.BeTrue()) exutil.By("Check nb load_balancer entries again!") lbOutput, err = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, lbCmd) e2e.Logf("\nlbOutput after SVC recreated: %s\n", lbOutput) o.Expect(err).NotTo(o.HaveOccurred()) if ipStack == "dualstack" || ipStack == "ipv6single" { clusterVIP = fmt.Sprintf("\"[%s]:%s\"=\"[%s]:%s\"", svcIP6, "27017", podIPv6, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } if ipStack == "dualstack" || ipStack == "ipv4single" { clusterVIP = fmt.Sprintf("\"%s:%s\"=\"%s:%s\"", svcIP4, "27017", podIPv4, "8080") o.Expect(lbOutput).Should(o.ContainSubstring(clusterVIP)) } exutil.By("Validate service") CurlPod2SvcPass(oc, ns, ns, pod1.name, svc.servicename) })
test case
openshift/openshift-tests-private
1afa490e-d38b-47ff-9117-4785d790bcd7
Author:asood-High-46015-[FdpOvnOvs] Verify traffic to outside the cluster redirected when OVN is used and NodePort service is configured.
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:asood-High-46015-[FdpOvnOvs] Verify traffic to outside the cluster redirected when OVN is used and NodePort service is configured.", func() { // Customer bug https://bugzilla.redhat.com/show_bug.cgi?id=1946696 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) ipStackType := checkIPStackType(oc) o.Expect(ipStackType).NotTo(o.BeEmpty()) exutil.By("1. Get list of worker nodes") nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("Not enough node available, need at least two nodes for the test, skip the case!!") } exutil.By("2. Get namespace ") ns := oc.Namespace() exutil.By("3. Create a hello pod in ns") pod := pingPodResourceNode{ name: "hello-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, pod.namespace, pod.name) exutil.By("4. Create a nodePort type service fronting the above pod") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "NodePort", ipFamilyPolicy: "", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } if ipStackType == "dualstack" { svc.ipFamilyPolicy = "PreferDualStack" } else { svc.ipFamilyPolicy = "SingleStack" } defer removeResource(oc, true, true, "service", svc.servicename, "-n", svc.namespace) svc.createServiceFromParams(oc) exutil.By("5. Get NodePort at which service listens.") nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6. Validate external traffic to node port is redirected.") CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort) curlCmd := fmt.Sprintf("curl -4 -v http://www.google.de:%s --connect-timeout 5", nodePort) resp, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", curlCmd) if (err != nil) || (resp != "") { o.Expect(strings.Contains(resp, "Hello OpenShift")).To(o.BeFalse()) } })
test case
openshift/openshift-tests-private
ed222f49-2be1-450a-ab92-e84b57d05a71
NonPreRelease-Longduration-Author:asood-Critical-63301-[FdpOvnOvs] Kube's API intermitent timeout via sdn or internal services from nodes or pods using hostnetwork. [Disruptive]
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("NonPreRelease-Longduration-Author:asood-Critical-63301-[FdpOvnOvs] Kube's API intermitent timeout via sdn or internal services from nodes or pods using hostnetwork. [Disruptive]", func() { // From customer bug https://issues.redhat.com/browse/OCPBUGS-5828 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ) //The test can run on the platforms that have nodes in same subnet as the hostnetworked pod backed service is accessible only such clusters. //The test also adds a bad route on the node from where the service is accessed for testing purpose. exutil.By("Check the platform if it is suitable for running the test") platform := exutil.CheckPlatform(oc) ipStackType := checkIPStackType(oc) if !strings.Contains(platform, "vsphere") && !strings.Contains(platform, "baremetal") { g.Skip("Unsupported platform, skipping the test") } if !strings.Contains(ipStackType, "ipv4single") { g.Skip("Unsupported stack, skipping the test") } exutil.By("Get the schedulable worker nodes in ready state") nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(nodeErr).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This test requires at least two worker nodes") } exutil.By("Switch the GW mode to Local") origMode := getOVNGatewayMode(oc) desiredMode := "local" defer switchOVNGatewayMode(oc, origMode) switchOVNGatewayMode(oc, desiredMode) exutil.By("Get namespace ") ns := oc.Namespace() err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Create pod on host network in namespace") pod1 := pingPodResourceNode{ name: "hello-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: hostNetworkPodTemplate, } pod1.createPingPodNode(oc) waitPodReady(oc, ns, pod1.name) exutil.By("Create a test service which is in front of the above pods") svc := genericServiceResource{ servicename: "test-service-63301", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "ClusterIP", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } svc.createServiceFromParams(oc) exutil.By("Check service status") svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("Get service IP") //nodeIP1 and nodeIP2 will be IPv6 and IPv4 respectively in case of dual stack and IPv4/IPv6 in 2nd var case of single _, nodeIP := getNodeIP(oc, nodeList.Items[0].Name) var curlCmd, addRouteCmd, delRouteCmd string svcIPv4 := getSvcIPv4(oc, svc.namespace, svc.servicename) curlCmd = fmt.Sprintf("curl -v %s:27017 --connect-timeout 5", svcIPv4) addRouteCmd = fmt.Sprintf("route add %s gw 127.0.0.1 lo", nodeIP) delRouteCmd = fmt.Sprintf("route delete %s", nodeIP) exutil.By("Create another pod for pinging the service") pod2 := pingPodResourceNode{ name: "ping-hello-pod", namespace: ns, nodename: nodeList.Items[1].Name, template: pingPodNodeTemplate, } pod2.createPingPodNode(oc) waitPodReady(oc, pod2.namespace, pod2.name) exutil.LabelPod(oc, pod2.namespace, pod2.name, "name-") exutil.LabelPod(oc, pod2.namespace, pod2.name, "name=ping-hello-pod") exutil.By("Validate the service from pod on cluster network") CurlPod2SvcPass(oc, ns, ns, pod2.name, svc.servicename) exutil.By("Validate the service from pod on host network") output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "bash", "-c", curlCmd) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeTrue()) exutil.By("Create a bad route to node where pod backing the service is running, on the host from where service is accessed ") defer exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", delRouteCmd) _, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", addRouteCmd) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("Validate the service from pod on cluster network to verify it fails") CurlPod2SvcFail(oc, ns, ns, pod2.name, svc.servicename) exutil.By("Validate the service from pod on host network to verify it fails") output, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "bash", "-c", curlCmd) if (err != nil) || (output != "") { o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeFalse()) } exutil.By("Delete the route that was added") _, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[1].Name, "/bin/bash", "-c", delRouteCmd) o.Expect(err).NotTo(o.HaveOccurred()) })
test case
openshift/openshift-tests-private
93ff1f59-fd1f-4142-81af-75ea5b66068c
Author:jechen-High-71385-OVNK only choose LB endpoints from ready pods unless there are only terminating pods still in serving state left to choose.
['"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:jechen-High-71385-OVNK only choose LB endpoints from ready pods unless there are only terminating pods still in serving state left to choose.", func() { // For customer bug https://issues.redhat.com/browse/OCPBUGS-24363 // OVNK choose LB endpoints in the following sequence: // 1. when there is/are pods in Ready state, ovnk ONLY choose endpoints of ready pods // 2. When there is/are no ready pods, ovnk choose endpoints that terminating + serving endpoints buildPruningBaseDir := exutil.FixturePath("testdata", "networking") testPodFile := filepath.Join(buildPruningBaseDir, "testpod-with-special-lifecycle.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") exutil.By("1.Get namespace \n") ns := oc.Namespace() exutil.By("2. Create test pods and scale test pods to 5 \n") createResourceFromFile(oc, ns, testPodFile) err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=5", "-n", ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = waitForPodWithLabelReady(oc, ns, "name=test-pods") exutil.AssertWaitPollNoErr(err, "Not all test pods with label name=test-pods are ready") exutil.By("3. Create a service in front of the above test pods \n") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "test-pods", serviceType: "ClusterIP", ipFamilyPolicy: "", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "", //This no value parameter will be ignored template: genericServiceTemplate, } ipStack := checkIPStackType(oc) if ipStack == "dualstack" { svc.ipFamilyPolicy = "PreferDualStack" } else { svc.ipFamilyPolicy = "SingleStack" } svc.createServiceFromParams(oc) exutil.By("4. Check OVN service lb status \n") svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("5. Get IP for the OVN service lb \n") var svcIPv6, svcIPv4, podIPv6, podIPv4 string if ipStack == "dualstack" || ipStack == "ipv6single" { svcIPv6, svcIPv4 = getSvcIP(oc, svc.namespace, svc.servicename) } else { svcIPv4, _ = getSvcIP(oc, svc.namespace, svc.servicename) } e2e.Logf("On this %s cluster, IP for service IP are svcIPv6: %s, svcIPv4: %s", ipStack, svcIPv6, svcIPv4) exutil.By("6. Check OVN service lb endpoints in northdb, it should include all running backend test pods \n") allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=test-pods") o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPods)).NotTo(o.BeEquivalentTo(0)) var expectedEndpointsv6, expectedEndpointsv4 []string for _, eachPod := range allPods { if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, eachPod) expectedEndpointsv6 = append(expectedEndpointsv6, "["+podIPv6+"]:8080") expectedEndpointsv4 = append(expectedEndpointsv4, podIPv4+":8080") } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, eachPod) expectedEndpointsv6 = append(expectedEndpointsv6, "["+podIPv6+"]:8080") } else { podIPv4, _ = getPodIP(oc, ns, eachPod) expectedEndpointsv4 = append(expectedEndpointsv4, podIPv4+":8080") } } e2e.Logf("\n On this %s cluster, V6 endpoints of service lb are expected to be: %v\n", ipStack, expectedEndpointsv6) e2e.Logf("\n On this %s cluster, V4 endpoints of service lb are expected to be: %v\n", ipStack, expectedEndpointsv4) // check service lb endpoints in northdb on each node's ovnkube-pod nodeList, nodeErr := exutil.GetAllNodesbyOSType(oc, "linux") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(nodeErr).NotTo(o.HaveOccurred()) o.Expect(len(nodeList)).NotTo(o.BeEquivalentTo(0)) var endpointsv6, endpointsv4 []string var epErr error for _, eachNode := range nodeList { if ipStack == "dualstack" || ipStack == "ipv6single" { endpointsv6, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, "\\["+svcIPv6+"\\]:27017") e2e.Logf("\n Got V6 endpoints of service lb for node %s : %v\n", eachNode, expectedEndpointsv6) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(endpointsv6, expectedEndpointsv6)).Should(o.BeTrue(), fmt.Sprintf("V6 service lb endpoints on node %sdo not match expected endpoints!", eachNode)) } if ipStack == "dualstack" || ipStack == "ipv4single" { endpointsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") e2e.Logf("\n Got V4 endpoints of service lb for node %s : %v\n", eachNode, expectedEndpointsv4) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(endpointsv4, expectedEndpointsv4)).Should(o.BeTrue(), fmt.Sprintf("V4 service lb endpoints on node %sdo not match expected endpoints!", eachNode)) } } exutil.By("7. Scale test pods down to 2 \n") scaleErr := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=2", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) var terminatingPods []string o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 3 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 2") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) var expectedCleanedUpEPsv6, expectedCleanedUpEPsv4, expectedRemindedEPsv6, expectedRemindedEPsv4, actualFinalEPsv6, actualFinalEPsv4 []string for _, eachPod := range terminatingPods { if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, eachPod) expectedCleanedUpEPsv6 = append(expectedCleanedUpEPsv6, "["+podIPv6+"]:8080") expectedCleanedUpEPsv4 = append(expectedCleanedUpEPsv4, podIPv4+":8080") } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, eachPod) expectedCleanedUpEPsv6 = append(expectedCleanedUpEPsv6, "["+podIPv6+"]:8080") } else { podIPv4, _ = getPodIP(oc, ns, eachPod) expectedCleanedUpEPsv4 = append(expectedCleanedUpEPsv4, podIPv4+":8080") } } e2e.Logf("\n On this %s cluster, V6 endpoints of service lb are expected to be cleaned up: %v\n", ipStack, expectedCleanedUpEPsv6) e2e.Logf("\n On this %s cluster, V4 endpoints of service lb are expected to be cleaned up: %v\n", ipStack, expectedCleanedUpEPsv4) runningPods := getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") o.Expect(len(runningPods)).To(o.BeEquivalentTo(2)) e2e.Logf("\n runningPods: %v\n", runningPods) for _, eachPod := range runningPods { if ipStack == "dualstack" { podIPv6, podIPv4 = getPodIP(oc, ns, eachPod) expectedRemindedEPsv6 = append(expectedRemindedEPsv6, "["+podIPv6+"]:8080") expectedRemindedEPsv4 = append(expectedRemindedEPsv4, podIPv4+":8080") } else if ipStack == "ipv6single" { podIPv6, _ = getPodIP(oc, ns, eachPod) expectedRemindedEPsv6 = append(expectedRemindedEPsv6, "["+podIPv6+"]:8080") } else { podIPv4, _ = getPodIP(oc, ns, eachPod) expectedRemindedEPsv4 = append(expectedRemindedEPsv4, podIPv4+":8080") } } e2e.Logf("\n On this %s cluster, V6 endpoints of service lb are expected to remind: %v\n", ipStack, expectedRemindedEPsv6) e2e.Logf("\n On this %s cluster, V4 endpoints of service lb are expected to remind: %v\n", ipStack, expectedRemindedEPsv4) exutil.By("8. Check lb-list entries in northdb again in each node's ovnkube-node pod, only Ready pods' endpoints reminded in service lb endpoints \n") for _, eachNode := range nodeList { if ipStack == "dualstack" || ipStack == "ipv6single" { actualFinalEPsv6, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, "\\["+svcIPv6+"\\]:27017") e2e.Logf("\n\n After scale-down to 2, V6 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv6) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(actualFinalEPsv6, expectedRemindedEPsv6)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V6 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } if ipStack == "dualstack" || ipStack == "ipv4single" { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 2, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } // Verify terminating pods' endpoints are not in final service lb endpoints if ipStack == "dualstack" || ipStack == "ipv6single" { for _, ep := range expectedCleanedUpEPsv6 { o.Expect(isValueInList(ep, actualFinalEPsv6)).ShouldNot(o.BeTrue(), fmt.Sprintf("After scale-down, terminating pod's V6 endpoint %s is not cleaned up from V6 service lb endpoint", ep)) } } if ipStack == "dualstack" || ipStack == "ipv4single" { for _, ep := range expectedCleanedUpEPsv4 { o.Expect(isValueInList(ep, actualFinalEPsv4)).ShouldNot(o.BeTrue(), fmt.Sprintf("After scale-down, terminating pod's V4 endpoint %s is not cleaned up from V4 service lb endpoint", ep)) } } } exutil.By("9. Wait for all three terminating pods from step 7-8 to disappear so that only two running pods are left\n") o.Eventually(func() bool { allPodsWithLabel := getPodName(oc, ns, "name=test-pods") runningPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") return len(runningPods) == len(allPodsWithLabel) }, "180s", "10s").Should(o.BeTrue(), "Terminating pods did not disappear after waiting enough time") exutil.By("10. Scale test pods down to 0 \n") scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=0", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 2 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 0") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) exutil.By("11. Check lb-list entries in northdb again in each node's ovnkube-node pod, verify that the two terminating but serving pods reminded in service lb endpoints \n") // expectedRemindedEPv4 or expectedRemindedEPv6 or both are still expected in NBDB for a little while, // that is because these two pods transition from Running state to terminating but serving state and there is no other running pod available for _, eachNode := range nodeList { if ipStack == "dualstack" || ipStack == "ipv6single" { actualFinalEPsv6, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, "\\["+svcIPv6+"\\]:27017") e2e.Logf("\n\n After scale-down to 0, V6 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv6) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(actualFinalEPsv6, expectedRemindedEPsv6)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V6 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } if ipStack == "dualstack" || ipStack == "ipv4single" { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 0, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } } })
test case
openshift/openshift-tests-private
153ee811-cdf9-414a-99b5-22df3b3b7fb4
Author:jechen-High-37033-ExternalVM access cluster through externalIP. [Disruptive]
['"fmt"', '"net"', '"os/exec"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:jechen-High-37033-ExternalVM access cluster through externalIP. [Disruptive]", func() { // This is for https://bugzilla.redhat.com/show_bug.cgi?id=1900118 and https://bugzilla.redhat.com/show_bug.cgi?id=1890270 buildPruningBaseDir := exutil.FixturePath("testdata", "networking") externalIPServiceTemplate := filepath.Join(buildPruningBaseDir, "externalip_service1-template.yaml") externalIPPodTemplate := filepath.Join(buildPruningBaseDir, "externalip_pod-template.yaml") var workers, nonExternalIPNodes []string var proxyHost, RDUHost, intf string if !(isPlatformSuitable(oc)) { g.Skip("These cases can only be run on networking team's private RDU clusters, skip for other envrionment!!!") } workers = excludeSriovNodes(oc) if len(workers) < 2 { g.Skip("Not enough nodes, need minimal 2 nodes on RDU for the test, skip the case!!") } msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output() if err != nil || strings.Contains(msg, "sriov.openshift-qe.sdn.com") { proxyHost = "10.8.1.181" RDUHost = "openshift-qe-028.lab.eng.rdu2.redhat.com" intf = "sriovbm" } if err != nil || strings.Contains(msg, "offload.openshift-qe.sdn.com") { proxyHost = "10.8.1.179" RDUHost = "openshift-qe-026.lab.eng.rdu2.redhat.com" intf = "offloadbm" } exutil.By("1. Get namespace, create an externalIP pod in it\n") ns := oc.Namespace() pod1 := externalIPPod{ name: "externalip-pod", namespace: ns, template: externalIPPodTemplate, } defer removeResource(oc, true, true, "pod", pod1.name, "-n", pod1.namespace) pod1.createExternalIPPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("2.Find another node, get its host CIDR, and one unused IP in its subnet \n") externalIPPodNode, err := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(externalIPPodNode).NotTo(o.Equal("")) e2e.Logf("ExternalIP pod is on node: %s", externalIPPodNode) for _, node := range workers { if node != externalIPPodNode { nonExternalIPNodes = append(nonExternalIPNodes, node) } } e2e.Logf("\n nonExternalIPNodes are: %v\n", nonExternalIPNodes) sub := getEgressCIDRsForNode(oc, nonExternalIPNodes[0]) freeIPs := findUnUsedIPsOnNodeOrFail(oc, nonExternalIPNodes[0], sub, 1) o.Expect(len(freeIPs)).Should(o.Equal(1)) exutil.By("4.Patch update network.config with the host CIDR to enable externalIP \n") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{}}}}") defer patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[]}}}}") patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+sub+"\"]}}}}") exutil.By("5.Create an externalIP service with the unused IP address obtained above as externalIP\n") svc := externalIPService{ name: "service-unsecure", namespace: ns, externalIP: freeIPs[0], template: externalIPServiceTemplate, } defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) parameters := []string{"--ignore-unknown-parameters=true", "-f", svc.template, "-p", "NAME=" + svc.name, "EXTERNALIP=" + svc.externalIP} exutil.ApplyNsResourceFromTemplate(oc, svc.namespace, parameters...) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.name).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.name)) g.By("Get the Node IP from any node, add a static route on the test runner host to assist the test") nodeIP := getNodeIPv4(oc, ns, nonExternalIPNodes[0]) ipRouteDeleteCmd := "ip route delete " + svc.externalIP defer sshRunCmd(RDUHost, "root", ipRouteDeleteCmd) ipRouteAddCmd := "ip route add " + svc.externalIP + " via " + nodeIP + " dev " + intf err = sshRunCmd(proxyHost, "root", ipRouteAddCmd) o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("6.Validate the externalIP service from external of the cluster (from test runner)\n") svc4URL := net.JoinHostPort(svc.externalIP, "27017") svcChkCmd := fmt.Sprintf("curl -H 'Cache-Control: no-cache' -x 'http://%s:8888' %s --connect-timeout 5", proxyHost, svc4URL) e2e.Logf("\n svcChkCmd: %v\n", svcChkCmd) output, curlErr := exec.Command("bash", "-c", svcChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") })
test case
openshift/openshift-tests-private
ea58d5f4-b461-4c31-a358-ed0ae4db2dbe
Author:jechen-NonHyperShiftHOST-High-43492-ExternalIP for node that has secondary IP. [Disruptive]
['"context"', '"fmt"', '"net"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:jechen-NonHyperShiftHOST-High-43492-ExternalIP for node that has secondary IP. [Disruptive]", func() { // This is for bug https://bugzilla.redhat.com/show_bug.cgi?id=1959798 buildPruningBaseDir := exutil.FixturePath("testdata", "networking") externalIPServiceTemplate := filepath.Join(buildPruningBaseDir, "externalip_service1-template.yaml") externalIPPodTemplate := filepath.Join(buildPruningBaseDir, "externalip_pod-template.yaml") intf := "br-ex" var workers, nonExternalIPNodes []string var proxyHost string platform := exutil.CheckPlatform(oc) msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output() if err != nil || strings.Contains(msg, "sriov.openshift-qe.sdn.com") { platform = "rdu1" proxyHost = "10.8.1.181" } if err != nil || strings.Contains(msg, "offload.openshift-qe.sdn.com") { platform = "rdu2" proxyHost = "10.8.1.179" } if strings.Contains(platform, "rdu1") || strings.Contains(platform, "rdu2") { workers = excludeSriovNodes(oc) if len(workers) < 2 { g.Skip("Not enough nodes, need minimal 2 nodes on RDU for the test, skip the case!!") } } else { nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) // for other non-RDU platforms, need minimal 3 nodes for the test if len(nodeList.Items) < 3 { g.Skip("Not enough worker nodes for this test, skip the case!!") } for _, node := range nodeList.Items { workers = append(workers, node.Name) } } exutil.By("1. Get namespace, create an externalIP pod in it\n") ns := oc.Namespace() pod1 := externalIPPod{ name: "externalip-pod", namespace: ns, template: externalIPPodTemplate, } pod1.createExternalIPPod(oc) waitPodReady(oc, pod1.namespace, pod1.name) exutil.By("2.Find another node, get its host CIDR, and one unused IP in its subnet \n") externalIPPodNode, err := exutil.GetPodNodeName(oc, pod1.namespace, pod1.name) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(externalIPPodNode).NotTo(o.Equal("")) e2e.Logf("ExternalIP pod is on node: %s", externalIPPodNode) for _, node := range workers { if node != externalIPPodNode { nonExternalIPNodes = append(nonExternalIPNodes, node) } } e2e.Logf("\n nonExternalIPNodes are: %v\n", nonExternalIPNodes) sub := getEgressCIDRsForNode(oc, nonExternalIPNodes[0]) freeIPs := findUnUsedIPsOnNodeOrFail(oc, nonExternalIPNodes[0], sub, 1) o.Expect(len(freeIPs)).Should(o.Equal(1)) _, hostIPwithPrefix := getIPv4AndIPWithPrefixForNICOnNode(oc, nonExternalIPNodes[0], intf) prefix := strings.Split(hostIPwithPrefix, "/")[1] e2e.Logf("\n On host %s, prefix of the host ip address: %v\n", nonExternalIPNodes[0], prefix) exutil.By(fmt.Sprintf("3. Add secondary IP %s to br-ex on the node %s", freeIPs[0]+"/"+prefix, nonExternalIPNodes[0])) defer delIPFromInferface(oc, nonExternalIPNodes[0], freeIPs[0], intf) addIPtoInferface(oc, nonExternalIPNodes[0], freeIPs[0]+"/"+prefix, intf) exutil.By("4.Patch update network.config with the host CIDR to enable externalIP \n") original, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("network/cluster", "-ojsonpath={.spec.externalIP}").Output() o.Expect(err).NotTo(o.HaveOccurred()) patch := `[{"op": "replace", "path": "/spec/externalIP", "value": ` + original + `}]` defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("network/cluster", "-p", patch, "--type=json").Execute() patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+sub+"\"]}}}}") exutil.By("5.Create an externalIP service with the unused IP address obtained above as externalIP\n") svc := externalIPService{ name: "service-unsecure", namespace: ns, externalIP: freeIPs[0], template: externalIPServiceTemplate, } defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) parameters := []string{"--ignore-unknown-parameters=true", "-f", svc.template, "-p", "NAME=" + svc.name, "EXTERNALIP=" + svc.externalIP} exutil.ApplyNsResourceFromTemplate(oc, svc.namespace, parameters...) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.name).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.name)) // For RDU, curl the externalIP service from test runner through proxy // For other platforms, since it is hard to get external host on same subnet of the secondary IP, we use another non-externalIP node as simulated test enviornment to validate exutil.By("6.Validate the externalIP service\n") svc4URL := net.JoinHostPort(svc.externalIP, "27017") var host string if platform == "rdu1" || platform == "rdu2" { exutil.By(fmt.Sprintf("On %s, use test runner to validate the externalIP service", platform)) host = proxyHost } else { exutil.By(fmt.Sprintf("On %s, use another non-externalIP node to validate the externalIP service", platform)) host = nonExternalIPNodes[1] } checkSvcErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { if validateService(oc, host, svc4URL) { return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(checkSvcErr, "The externalIP service is not reachable as expected") exutil.By("7.Check OVN-KUBE-EXTERNALIP iptables chain is updated correctly\n") for _, node := range workers { output, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "iptables -n -v -t nat -L OVN-KUBE-EXTERNALIP") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(output, svc.externalIP)).Should(o.BeTrue(), fmt.Sprintf("OVN-KUBE-EXTERNALIP iptables chain was not updated correctly on node %s", node)) } })
test case
openshift/openshift-tests-private
38660645-de5b-44fb-aaff-4b6d0170acc4
Author:jechen-ConnectedOnly-High-24672-ExternalIP configured from autoAssignCIDRs. [Disruptive]
['"context"', '"fmt"', '"net"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:jechen-ConnectedOnly-High-24672-ExternalIP configured from autoAssignCIDRs. [Disruptive]", func() { buildPruningBaseDir := exutil.FixturePath("testdata", "networking") pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") platform := exutil.CheckPlatform(oc) e2e.Logf("platform %s", platform) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") if !acceptedPlatform || checkDisconnect(oc) { g.Skip("Test cases should be run on connected GCP, Azure, skip for other platforms or disconnected cluster!!") } // skip if no spec.publicZone specified in dns.config // the private cluster will be skipped as well // refer to https://issues.redhat.com/browse/OCPQE-22704 dnsPublicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns.config/cluster", "-ojsonpath={.spec.publicZone}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if dnsPublicZone == "" { g.Skip("Skip for the platforms that no dns publicZone specified") } nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("Not enough nodes, need 2 nodes for the test, skip the case!!") } exutil.By("1. Get namespace\n") ns := oc.Namespace() svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } // For GCP/Azure, create a loadbalancer service first to get LB service's LB ip address, then derive its subnet to be used in step 3, exutil.By("2. For public cloud platform, create a loadBalancer service first\n") svc.createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("3. Create a test pod\n") pod := pingPodResourceNode{ name: "hello-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: pingPodNodeTemplate, } pod.createPingPodNode(oc) waitPodReady(oc, ns, pod.name) exutil.By("4. For GCP/Azure, get LB's ip address\n") svcExternalIP := getLBSVCIP(oc, svc.namespace, svc.servicename) e2e.Logf("Got externalIP service IP: %v", svcExternalIP) o.Expect(svcExternalIP).NotTo(o.BeEmpty()) exutil.By("5. Derive LB's subnet from its IP address\n") ingressLBIP := net.ParseIP(svcExternalIP) if ingressLBIP == nil { g.Skip("Did not get valid IP address for the host of LB service, skip the rest of test!!") } mask := net.CIDRMask(24, 32) // Assuming /24 subnet mask subnet := ingressLBIP.Mask(mask).String() + "/24" e2e.Logf("LB's subnet: %v", subnet) exutil.By("6. Patch update network.config with subnet obtained above to enable autoAssignCIDR for externalIP\n") original, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("network/cluster", "-ojsonpath={.spec.externalIP}").Output() o.Expect(err).NotTo(o.HaveOccurred()) patch := `[{"op": "replace", "path": "/spec/externalIP", "value": ` + original + `}]` defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("network/cluster", "-p", patch, "--type=json").Execute() patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"autoAssignCIDRs\":[\""+subnet+"\"]}}}") patchResourceAsAdmin(oc, "network/cluster", "{\"spec\":{\"externalIP\":{\"policy\":{\"allowedCIDRs\":[\""+subnet+"\"]}}}}") // Wait a little for autoAssignCIDR to take effect time.Sleep(10 * time.Second) exutil.By("7.Curl the externalIP service from test runner\n") svc4URL := net.JoinHostPort(svcExternalIP, "27017") svcChkCmd := fmt.Sprintf("curl %s --connect-timeout 30", svc4URL) e2e.Logf("\n svcChkCmd: %v\n", svcChkCmd) checkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 30*time.Second, false, func(cxt context.Context) (bool, error) { output, err1 := exec.Command("bash", "-c", svcChkCmd).Output() if err1 != nil { e2e.Logf("got err:%v, and try next round", err1) return false, nil } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Fail to curl the externalIP service from test runner %s", svc4URL)) })
test case
openshift/openshift-tests-private
c1d157e6-1fcc-4598-8b0f-14a028f7c87d
Author:jechen-High-74601-Verify traffic and OVNK LB endpoints in nbdb for LoadBalancer Service when externalTrafficPolicy is set to Cluster.[Serial]
['"context"', '"fmt"', '"net"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:jechen-High-74601-Verify traffic and OVNK LB endpoints in nbdb for LoadBalancer Service when externalTrafficPolicy is set to Cluster.[Serial]", func() { // For customer bug https://issues.redhat.com/browse/OCPBUGS-24363 // OVNK choose LB endpoints in the following sequence: // 1. when there is/are pods in Ready state, ovnk ONLY choose endpoints of ready pods // 2. When there is/are no ready pods, ovnk choose endpoints that terminating + serving endpoints buildPruningBaseDir := exutil.FixturePath("testdata", "networking") testPodFile := filepath.Join(buildPruningBaseDir, "testpod-with-special-lifecycle.yaml") genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") platform := exutil.CheckPlatform(oc) scheduleableNodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) acceptedPlatform := strings.Contains(platform, "gcp") || strings.Contains(platform, "azure") if !acceptedPlatform || len(scheduleableNodeList.Items) < 2 { g.Skip("Test cases should be run on GCP or Azure cluster with ovn network plugin, minimal 2 nodes are required, skip for others that do not meet the test requirement") } exutil.By("1. Get namespace, create 2 test pods in it, create a service in front of the test pods \n") ns := oc.Namespace() createResourceFromFile(oc, ns, testPodFile) err = waitForPodWithLabelReady(oc, ns, "name=test-pods") exutil.AssertWaitPollNoErr(err, "Not all test pods with label name=test-pods are ready") exutil.By("2. Create a service in front of the above test pods \n") svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "test-pods", serviceType: "LoadBalancer", ipFamilyPolicy: "SingleStack", internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Cluster", template: genericServiceTemplate, } svc.createServiceFromParams(oc) svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename)) exutil.By("3. Get IP for the OVN service lb \n") var svcIPv4, podIPv4, curlSVC4ChkCmd string svcIPv4, _ = getSvcIP(oc, svc.namespace, svc.servicename) curlSVC4ChkCmd = fmt.Sprintf("for i in {1..10}; do curl %s --connect-timeout 5 ; sleep 2;echo ;done", net.JoinHostPort(svcIPv4, "27017")) e2e.Logf("IP for service IP: %s", svcIPv4) exutil.By("4. Before scale down test pods, check OVN service lb endpoints in northdb and traffic at endpoints \n") exutil.By("4.1. Check OVN service lb endpoints in northdb, it should include all running backend test pods \n") allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=test-pods") o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPods)).NotTo(o.BeEquivalentTo(0)) var expectedEndpointsv4 []string podNodeNames := make(map[string]string) podIPv4s := make(map[string]string) for _, eachPod := range allPods { nodeName, getNodeErr := exutil.GetPodNodeName(oc, ns, eachPod) o.Expect(getNodeErr).NotTo(o.HaveOccurred()) podNodeNames[eachPod] = nodeName podIPv4, _ = getPodIP(oc, ns, eachPod) podIPv4s[eachPod] = podIPv4 expectedEndpointsv4 = append(expectedEndpointsv4, podIPv4+":8080") } e2e.Logf("\n V4 endpoints of service lb are expected to be: %v\n", expectedEndpointsv4) // check service lb endpoints in northdb on each node's ovnkube-pod nodeList, nodeErr := exutil.GetAllNodesbyOSType(oc, "linux") o.Expect(nodeErr).NotTo(o.HaveOccurred()) o.Expect(len(nodeList)).NotTo(o.BeEquivalentTo(0)) var endpointsv4 []string var epErr error for _, eachNode := range nodeList { endpointsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") e2e.Logf("\n Got V4 endpoints of service lb for node %s : %v\n", eachNode, expectedEndpointsv4) o.Expect(epErr).NotTo(o.HaveOccurred()) o.Expect(unorderedEqual(endpointsv4, expectedEndpointsv4)).Should(o.BeTrue(), fmt.Sprintf("V4 service lb endpoints on node %sdo not match expected endpoints!", eachNode)) } exutil.By("4.2. Verify all running pods get traffic \n") var channels [2]chan string // Initialize each channel in the array for i := range channels { channels[i] = make(chan string) } exutil.By(" Start tcpdump on each pod's node") for i, pod := range allPods { go func(i int, pod string) { defer g.GinkgoRecover() tcpdumpCmd := fmt.Sprintf(`timeout 60s tcpdump -c 4 -nneep -i any "(dst port 8080) and (dst %s)"`, podIPv4s[pod]) outputTcpdump, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+podNodeNames[pod], "--", "bash", "-c", tcpdumpCmd).Output() channels[i] <- outputTcpdump }(i, pod) } // add sleep time to let the ping action happen later after tcpdump is enabled. time.Sleep(5 * time.Second) exutil.By(" Curl the externalIP service from test runner\n") output, curlErr := exec.Command("bash", "-c", curlSVC4ChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) for i, pod := range allPods { receivedMsg := <-channels[i] e2e.Logf(" at step 4.2, tcpdumpOutput for node %s is \n%s\n\n", podNodeNames[pod], receivedMsg) o.Expect(strings.Contains(receivedMsg, podIPv4s[pod])).Should(o.BeTrue()) } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") exutil.By("5. Scale test pods down to 1 \n") scaleErr := oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=1", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) allPods = allPods[:0] var terminatingPods []string o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 1 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 1") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) allPods = append(allPods, terminatingPods[0]) var expectedCleanedUpEPsv4, expectedRemindedEPsv4, actualFinalEPsv4 []string for _, eachPod := range terminatingPods { expectedCleanedUpEPsv4 = append(expectedCleanedUpEPsv4, podIPv4s[eachPod]+":8080") } e2e.Logf("\n V4 endpoints of service lb are expected to be cleaned up: %v\n", expectedCleanedUpEPsv4) runningPods := getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") o.Expect(len(runningPods)).To(o.BeEquivalentTo(1)) e2e.Logf("\n runningPods: %v\n", runningPods) allPods = append(allPods, runningPods[0]) for _, eachPod := range runningPods { expectedRemindedEPsv4 = append(expectedRemindedEPsv4, podIPv4s[eachPod]+":8080") } e2e.Logf("\n V4 endpoints of service lb are expected to remind: %v\n", expectedRemindedEPsv4) exutil.By("5.1. Check lb-list entries in northdb again in each node's ovnkube-node pod, only Ready pods' endpoints reminded in service lb endpoints \n") for _, eachNode := range nodeList { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 2, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) // Verify terminating pods' endpoints are not in final service lb endpoints for _, ep := range expectedCleanedUpEPsv4 { o.Expect(isValueInList(ep, actualFinalEPsv4)).ShouldNot(o.BeTrue(), fmt.Sprintf("After scale-down, terminating pod's V4 endpoint %s is not cleaned up from V4 service lb endpoint", ep)) } } exutil.By("5.2 Verify only the running pod receives traffic, the terminating pod does not receive traffic \n") for i, pod := range allPods { go func(i int, pod string) { defer g.GinkgoRecover() tcpdumpCmd := fmt.Sprintf(`timeout 60s tcpdump -c 4 -nneep -i any "(dst port 8080) and (dst %s)"`, podIPv4s[pod]) outputTcpdump, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+podNodeNames[pod], "--", "bash", "-c", tcpdumpCmd).Output() channels[i] <- outputTcpdump }(i, pod) } // add sleep time to let the ping action happen later after tcpdump is enabled. time.Sleep(5 * time.Second) output, curlErr = exec.Command("bash", "-c", curlSVC4ChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) for i, pod := range allPods { receivedMsg := <-channels[i] e2e.Logf(" at step 5.2, tcpdumpOutput for node %s is \n%s\n\n", podNodeNames[pod], receivedMsg) if pod == terminatingPods[0] { o.Expect(strings.Contains(receivedMsg, "0 packets captured")).Should(o.BeTrue()) } else { o.Expect(strings.Contains(receivedMsg, podIPv4s[pod])).Should(o.BeTrue()) } } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") exutil.By("5.3. Wait for terminating pod from step 7 to disappear so that there is only one running pod left\n") o.Eventually(func() bool { allPodsWithLabel := getPodName(oc, ns, "name=test-pods") runningPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Running") return len(runningPods) == len(allPodsWithLabel) }, "180s", "10s").Should(o.BeTrue(), "Terminating pods did not disappear after waiting enough time") exutil.By("6. Scale test pods down to 0 \n") scaleErr = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=0", "-n", ns).Execute() o.Expect(scaleErr).NotTo(o.HaveOccurred()) o.Eventually(func() bool { terminatingPods = getAllPodsWithLabelAndCertainState(oc, ns, "name=test-pods", "Terminating") return len(terminatingPods) == 1 }, "30s", "5s").Should(o.BeTrue(), "Test pods did not scale down to 0") e2e.Logf("\n terminatingPods: %v\n", terminatingPods) exutil.By("6.1. Check lb-list entries in northdb again in each node's ovnkube-node pod, verify that the two terminating but serving pods reminded in service lb endpoints \n") // expectedRemindedEPv4 are still expected in NBDB for a little while, // that is because the last pod transition from Running state to terminating but serving state and there is no other running pod available for _, eachNode := range nodeList { actualFinalEPsv4, epErr = getLBListEndpointsbySVCIPPortinNBDB(oc, eachNode, svcIPv4+":27017") o.Expect(epErr).NotTo(o.HaveOccurred()) e2e.Logf("\n\n After scale-down to 0, V4 endpoints from lb-list output on node %s northdb: %v\n\n", eachNode, actualFinalEPsv4) o.Expect(unorderedEqual(actualFinalEPsv4, expectedRemindedEPsv4)).Should(o.BeTrue(), fmt.Sprintf("After scale-down, V4 service lb endpoints on node %s do not match expected endpoints!", eachNode)) } exutil.By("6.2 Verify that the terminating pod still receives traffic because there is no other running pod\n") for i, pod := range terminatingPods { go func(i int, pod string) { defer g.GinkgoRecover() tcpdumpCmd := fmt.Sprintf(`timeout 60s tcpdump -c 4 -nneep -i any "(dst port 8080) and (dst %s)"`, podIPv4s[pod]) outputTcpdump, _ := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+podNodeNames[pod], "--", "bash", "-c", tcpdumpCmd).Output() channels[i] <- outputTcpdump }(i, pod) } // add sleep time to let the ping action happen later after tcpdump is enabled. time.Sleep(5 * time.Second) output, curlErr = exec.Command("bash", "-c", curlSVC4ChkCmd).Output() o.Expect(curlErr).NotTo(o.HaveOccurred()) for i, pod := range terminatingPods { receivedMsg := <-channels[i] e2e.Logf(" at step 6.2, tcpdumpOutput for node %s is \n%s\n\n", podNodeNames[pod], receivedMsg) o.Expect(strings.Contains(receivedMsg, podIPv4s[pod])).Should(o.BeTrue()) } o.Expect(strings.Contains(string(output), "Hello OpenShift")).Should(o.BeTrue(), "The externalIP service is not reachable as expected") })
test case
openshift/openshift-tests-private
962f2510-c6e7-432f-b56f-994c13bbe54f
Author:asood-Medium-75424-SessionAffinity does not work after scaling down the Pods
['"fmt"', '"path/filepath"', '"strconv"', '"strings"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:asood-Medium-75424-SessionAffinity does not work after scaling down the Pods", func() { //Bug: https://issues.redhat.com/browse/OCPBUGS-28604 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") servicesBaseDir = exutil.FixturePath("testdata", "networking/services") pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml") sessionAffinitySvcTemplate = filepath.Join(servicesBaseDir, "sessionaffinity-svc-template.yaml") customResponsePodTemplate = filepath.Join(servicesBaseDir, "custom-response-pod-template.yaml") labelKey = "name" labelVal = "openshift" testID = "75424" curlCmdList = []string{} ) ns := oc.Namespace() exutil.By(fmt.Sprintf("Create pods that will serve as the endpoints for Session Affinity enabled service in %s project", ns)) customResponsePod := customResponsePodResource{ name: " ", namespace: ns, labelKey: labelKey, labelVal: labelVal, responseStr: " ", template: customResponsePodTemplate, } for i := 0; i < 3; i++ { customResponsePod.name = "hello-pod-" + strconv.Itoa(i) customResponsePod.responseStr = "Hello from " + customResponsePod.name customResponsePod.createCustomResponsePod(oc) waitPodReady(oc, ns, customResponsePod.name) } exutil.By(fmt.Sprintf("Create a test pod in %s", ns)) testPod := pingPodResource{ name: "test-pod", namespace: ns, template: pingPodTemplate, } testPod.createPingPod(oc) waitPodReady(oc, ns, testPod.name) svc := sessionAffinityServiceResource{ name: " ", namespace: ns, ipFamilyPolicy: " ", selLabelKey: labelKey, SelLabelVal: labelVal, template: sessionAffinitySvcTemplate, } ipStackType := checkIPStackType(oc) exutil.By(fmt.Sprintf("Create a service with session affinity enabled on %s cluster", ipStackType)) if ipStackType == "dualstack" { svc.name = "dualstacksvc-" + testID svc.ipFamilyPolicy = "PreferDualStack" defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) svc.createSessionAffiniltyService(oc) svcOutput, svcErr := oc.AsAdmin().Run("get").Args("service", "-n", svc.namespace).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring(svc.name)) serviceIPv6, serviceIPv4 := getSvcIP(oc, svc.namespace, svc.name) curlCmdList = append(curlCmdList, fmt.Sprintf("curl %s:8080 --connect-timeout 5", serviceIPv4)) curlCmdList = append(curlCmdList, fmt.Sprintf("curl -g -6 [%s]:8080 --connect-timeout 5", serviceIPv6)) } else { svc.ipFamilyPolicy = "SingleStack" svc.name = "singlestack-" + ipStackType + "-svc-" + testID defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace) svc.createSessionAffiniltyService(oc) svcOutput, svcErr := oc.AsAdmin().Run("get").Args("service", "-n", svc.namespace).Output() o.Expect(svcErr).NotTo(o.HaveOccurred()) o.Expect(svcOutput).To(o.ContainSubstring(svc.name)) if ipStackType == "ipv6single" { serviceIPv6, _ := getSvcIP(oc, svc.namespace, svc.name) curlCmdList = append(curlCmdList, fmt.Sprintf("curl -g -6 [%s]:8080 --connect-timeout 5", serviceIPv6)) } else { serviceIPv4 := getSvcIPv4(oc, svc.namespace, svc.name) curlCmdList = append(curlCmdList, fmt.Sprintf("curl %s:8080 --connect-timeout 5", serviceIPv4)) } } for _, curlCmd := range curlCmdList { exutil.By(fmt.Sprintf("Test session affinity using request '%s' cluster", curlCmd)) e2e.Logf("Send first request to service") firstResponse1, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) e2e.Logf("Request response: %s", firstResponse1) for i := 0; i < 9; i++ { requestResp, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(requestResp, firstResponse1)).To(o.BeTrue()) } e2e.Logf("Find the pod serving request and delete it") respStr := strings.Split(strings.TrimRight(firstResponse1, "\n"), " ") o.Expect(len(respStr)).To(o.BeEquivalentTo(3)) o.Expect(respStr[2]).NotTo(o.BeEmpty()) removeResource(oc, true, true, "pod", respStr[2], "-n", ns) e2e.Logf(fmt.Sprintf("Send first request to service after deleting the previously serving pod %s", respStr[2])) firstResponse2, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) e2e.Logf("Request response: %s", firstResponse2) o.Expect(strings.Contains(firstResponse2, firstResponse1)).To(o.BeFalse()) for i := 0; i < 9; i++ { requestResp, requestErr := e2eoutput.RunHostCmd(ns, testPod.name, curlCmd) o.Expect(requestErr).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(requestResp, firstResponse2)).To(o.BeTrue()) } } })
test case
openshift/openshift-tests-private
57cfb7af-3ded-4b65-86d1-b13a226039cc
Author:meinli-Critical-78262-Validate pod/host to hostnetwork pod/nodeport with hostnetwork pod backend on same/diff workers
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
github.com/openshift/openshift-tests-private/test/extended/networking/services.go
g.It("Author:meinli-Critical-78262-Validate pod/host to hostnetwork pod/nodeport with hostnetwork pod backend on same/diff workers", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking") hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml") pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml") genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml") ipFamilyPolicy = "SingleStack" ) platform := exutil.CheckPlatform(oc) if !(strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "none")) { g.Skip("These cases can only be run on networking team's private RDU BM cluster, vSphere and IPI/UPI BM, skip for other platforms!!!") } exutil.By("1. Get namespace, master and worker node") ns := oc.Namespace() nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 2 { g.Skip("This case requires 2 nodes, but the cluster has less than two nodes") } masterNode, err := exutil.GetFirstMasterNode(oc) o.Expect(err).NotTo(o.HaveOccurred()) //Required for hostnetwork pod err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("2. Create hostnetwork pod in ns") hostpod := pingPodResourceNode{ name: "hostnetwork-pod", namespace: ns, nodename: nodeList.Items[0].Name, template: hostNetworkPodTemplate, } hostpod.createPingPodNode(oc) waitPodReady(oc, ns, hostpod.name) exutil.By("3. Create nodeport service with hostnetwork pod backend when externalTrafficPolicy=Local") ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { ipFamilyPolicy = "PreferDualStack" } svc := genericServiceResource{ servicename: "test-service", namespace: ns, protocol: "TCP", selector: "hello-pod", serviceType: "NodePort", ipFamilyPolicy: ipFamilyPolicy, internalTrafficPolicy: "Cluster", externalTrafficPolicy: "Local", template: genericServiceTemplate, } svc.createServiceFromParams(oc) nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("4. Create two normal pods on diff workers") pods := make([]pingPodResourceNode, 2) for i := 0; i < 2; i++ { pods[i] = pingPodResourceNode{ name: "hello-pod" + strconv.Itoa(i), namespace: ns, nodename: nodeList.Items[i].Name, template: pingPodNodeTemplate, } pods[i].createPingPodNode(oc) waitPodReady(oc, ns, pods[i].name) defer exutil.LabelPod(oc, ns, pods[i].name, "name-") err = oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns, "pod", pods[i].name, fmt.Sprintf("name=hello-pod-%d", i), "--overwrite=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) } exutil.By("5. Validate host to pod on same/diff workers") CurlNode2PodPass(oc, pods[0].nodename, ns, pods[0].name) CurlNode2PodPass(oc, pods[1].nodename, ns, pods[0].name) exutil.By("6. Validate pod to host network pod on same/diff workers") CurlPod2PodPass(oc, ns, pods[0].name, ns, hostpod.name) CurlPod2PodPass(oc, ns, pods[1].name, ns, hostpod.name) exutil.By("7. Validate pod to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Local") CurlPod2NodePortPass(oc, ns, pods[0].name, nodeList.Items[0].Name, nodePort) CurlPod2NodePortFail(oc, ns, pods[0].name, nodeList.Items[1].Name, nodePort) exutil.By("8. Validate host to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Local") CurlNodePortPass(oc, masterNode, nodeList.Items[0].Name, nodePort) CurlNodePortFail(oc, masterNode, nodeList.Items[1].Name, nodePort) exutil.By("9. Validate pod to nodeport with hostnetwork pod backend on diff workers when externalTrafficPolicy=Cluster") exutil.By("9.1 Create nodeport service with externalTrafficPolicy=Cluster in ns1 and ns2") removeResource(oc, true, true, "svc", "test-service", "-n", ns) svc.externalTrafficPolicy = "Cluster" svc.createServiceFromParams(oc) nodePort, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) exutil.By("9.2 Validate pod to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Cluster") CurlPod2NodePortPass(oc, ns, pods[0].name, nodeList.Items[0].Name, nodePort) CurlPod2NodePortPass(oc, ns, pods[0].name, nodeList.Items[1].Name, nodePort) exutil.By("10. Validate host to nodePort with hostnetwork pod backend on same/diff workers when externalTrafficPolicy=Cluster") CurlNodePortPass(oc, masterNode, nodeList.Items[0].Name, nodePort) CurlNodePortPass(oc, masterNode, nodeList.Items[1].Name, nodePort) })
test
openshift/openshift-tests-private
3ab4b630-be2c-4c16-a38f-dc2f9b1b7cbe
sriov
import ( "context" "fmt" "path/filepath" "regexp" "strings" "time" filePath "path/filepath" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" netobserv "github.com/openshift/openshift-tests-private/test/extended/netobserv" exutil "github.com/openshift/openshift-tests-private/test/extended/util" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2e "k8s.io/kubernetes/test/e2e/framework" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" )
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
package networking import ( "context" "fmt" "path/filepath" "regexp" "strings" "time" filePath "path/filepath" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" netobserv "github.com/openshift/openshift-tests-private/test/extended/netobserv" exutil "github.com/openshift/openshift-tests-private/test/extended/util" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2e "k8s.io/kubernetes/test/e2e/framework" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" ) var _ = g.Describe("[sig-networking] SDN sriov", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("sriov-"+getRandomString(), exutil.KubeConfigPath()) ) g.BeforeEach(func() { // for now skip sriov cases in temp in order to avoid cases always show failed in CI since sriov operator is not setup . will add install operator function after that _, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), "openshift-sriov-network-operator", metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { g.Skip("the cluster do not install sriov operator") } } }) g.It("NonPreRelease-Author:yingwang-Medium-Longduration-42253-Pod with sriov interface should be created successfully with empty pod.ObjectMeta.Namespace in body [Disruptive]", func() { var ( networkBaseDir = exutil.FixturePath("testdata", "networking") sriovBaseDir = filepath.Join(networkBaseDir, "sriov") sriovNetPolicyName = "netpolicy42253" sriovNetDeviceName = "netdevice42253" sriovOpNs = "openshift-sriov-network-operator" podName1 = "sriov-42253-testpod1" podName2 = "sriov-42253-testpod2" pfName = "ens2f0" deviceID = "1015" ipv4Addr1 = "192.168.2.5/24" ipv6Addr1 = "2002::5/64" ipv4Addr2 = "192.168.2.6/24" ipv6Addr2 = "2002::6/64" sriovIntf = "net1" podTempfile = "sriov-testpod-template.yaml" serviceAccount = "deployer" ) sriovNetworkPolicyTmpFile := filepath.Join(sriovBaseDir, "netpolicy42253-template.yaml") sriovNetworkPolicy := sriovNetResource{ name: sriovNetPolicyName, namespace: sriovOpNs, tempfile: sriovNetworkPolicyTmpFile, kind: "SriovNetworkNodePolicy", } sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "netdevice42253-template.yaml") sriovNetwork := sriovNetResource{ name: sriovNetDeviceName, namespace: sriovOpNs, tempfile: sriovNetworkAttachTmpFile, kind: "SriovNetwork", } g.By("1) ####### Check openshift-sriov-network-operator is running well ##########") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } //make sure the pf and sriov network policy name are not occupied rmSriovNetworkPolicy(oc, sriovNetworkPolicy.name, sriovNetworkPolicy.namespace) rmSriovNetwork(oc, sriovNetwork.name, sriovNetwork.namespace) oc.SetupProject() g.By("2) ####### Create sriov network policy ############") sriovNetworkPolicy.create(oc, "PFNAME="+pfName, "DEVICEID="+deviceID, "SRIOVNETPOLICY="+sriovNetworkPolicy.name) defer rmSriovNetworkPolicy(oc, sriovNetworkPolicy.name, sriovNetworkPolicy.namespace) waitForSriovPolicyReady(oc, sriovNetworkPolicy.namespace) g.By("3) ######### Create sriov network attachment ############") e2e.Logf("create sriov network attachment via template") sriovNetwork.create(oc, "TARGETNS="+oc.Namespace(), "SRIOVNETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetworkPolicy.name) defer sriovNetwork.delete(oc) // ensure the resource is deleted whether the case exist normally or not. g.By("4) ########### Create Pod and attach sriov interface using cli ##########") podTempFile1 := filepath.Join(sriovBaseDir, podTempfile) testPod1 := sriovPod{ name: podName1, namespace: oc.Namespace(), tempfile: podTempFile1, ipv4addr: ipv4Addr1, ipv6addr: ipv6Addr1, intfname: sriovIntf, intfresource: sriovNetDeviceName, } podsLog := testPod1.createPod(oc) defer testPod1.deletePod(oc) // ensure the resource is deleted whether the case exist normally or not. testPod1.waitForPodReady(oc) intfInfo1 := testPod1.getSriovIntfonPod(oc) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.intfname)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv4addr)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv6addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod1.name) g.By("5) ########### Create Pod via url without namespace ############") podTempFile2 := filepath.Join(sriovBaseDir, podTempfile) testPod2 := sriovPod{ name: podName2, namespace: oc.Namespace(), tempfile: podTempFile2, ipv4addr: ipv4Addr2, ipv6addr: ipv6Addr2, intfname: sriovIntf, intfresource: sriovNetDeviceName, } e2e.Logf("extract curl reqeust command from logs of creating pod via cli") re := regexp.MustCompile("(curl.+-XPOST.+kubectl-create')") match := re.FindStringSubmatch(podsLog) curlCmd := match[1] e2e.Logf("Extracted curl from pod creating logs is %s", curlCmd) //creating pod via curl request testPod2.sendHTTPRequest(oc, serviceAccount, curlCmd) defer testPod2.deletePod(oc) testPod2.waitForPodReady(oc) intfInfo2 := testPod2.getSriovIntfonPod(oc) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.intfname)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv4addr)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv6addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod2.name) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25321-[E810-C] Check intel dpdk works well [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" sriovNodeLabel = "feature.node.kubernetes.io/sriov-capable=true" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810", deviceType: "vfio-pci", deviceID: "1593", pfName: "ens2f2", vendor: "8086", numVfs: 2, resourceName: "e810dpdk", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") sriovPolicy.createPolicy(oc) defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) waitForSriovPolicyReady(oc, sriovOpNs) g.By("check the vhost is loaded") sriovNode := getSriovNode(oc, sriovOpNs, sriovNodeLabel) output, err := exutil.DebugNodeWithChroot(oc, sriovNode, "bash", "-c", "lsmod | grep vhost") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).To(o.ContainSubstring("vhost_net")) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } sriovnetwork.createSriovNetwork(oc) defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err = waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-49213-[E810-C] VF with large number can be inited for intel card [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovOpNs = "openshift-sriov-network-operator" sriovNodeLabel = "feature.node.kubernetes.io/sriov-capable=true" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810", deviceType: "netdevice", deviceID: "1593", pfName: "ens2f0", vendor: "8086", numVfs: 40, resourceName: "e810net", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") sriovPolicy.createPolicy(oc) defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) waitForSriovPolicyReady(oc, sriovOpNs) g.By("check the link show the correct VF") sriovNode := getSriovNode(oc, sriovOpNs, sriovNodeLabel) output, err := exutil.DebugNodeWithChroot(oc, sriovNode, "bash", "-c", "ip l | grep "+sriovPolicy.pfName+"v | wc -l") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).To(o.ContainSubstring("40")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-47660-[E810-XXV] DPDK works well in pod with vfio-pci for E810-XXVDA4 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810xxv", deviceType: "vfio-pci", deviceID: "159b", pfName: "ens2f0", vendor: "8086", numVfs: 2, resourceName: "e810dpdk", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-47661-[E810-XXV] sriov pod with netdevice deviceType for E810-XXVDA4 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810xxv", deviceType: "netdevice", deviceID: "159b", pfName: "ens2f0", vendor: "8086", numVfs: 3, resourceName: "e810netdevice", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "e810netdevice", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-netdevice") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready") g.By("Check test pod have second interface with assigned ip") command := "ip a show net1" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("10.56.217")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-41145-[xl710] sriov pod can be worked well with netdevice deviceType for xl710 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "xl710", deviceType: "netdevice", deviceID: "1583", pfName: "ens2f0", vendor: "8086", numVfs: 3, resourceName: "xl710netdevice", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "xl710netdevice", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-netdevice") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready") g.By("Check test pod have second interface with assigned ip") command := "ip a show net1" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("10.56.217")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-41144-[xl710] DPDK works well in pod with vfio-pci for xl710 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "xl710", deviceType: "vfio-pci", deviceID: "1583", pfName: "ens2f0", vendor: "8086", numVfs: 2, resourceName: "xl710", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) }) g.It("NonPreRelease-Longduration-Author:yingwang-Medium-50440-creating and deleting multiple sriovnetworknodepolicy, cluster can work well.[Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovOpNs = "openshift-sriov-network-operator" sriovNetPolicyName1 = "sriovpolicypf1" sriovNetPolicyName2 = "sriovpolicypf2" ) sriovNetPolicy1 := sriovNetworkNodePolicy{ policyName: sriovNetPolicyName1, deviceType: "netdevice", deviceID: "1015", pfName: "ens2f0", vendor: "15b3", numVfs: 2, resourceName: sriovNetPolicyName1, template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } sriovNetPolicy2 := sriovNetworkNodePolicy{ policyName: sriovNetPolicyName2, deviceType: "netdevice", deviceID: "1015", pfName: "ens2f1", vendor: "15b3", numVfs: 2, resourceName: sriovNetPolicyName2, template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("1) ####### Check openshift-sriov-network-operator is running well ##########") chkSriovOperatorStatus(oc, sriovOpNs) g.By("2) Check the deviceID exists on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovNetPolicy1.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("3) ####### create a new sriov policy before the previous one is ready ############") //create one sriovnetworknodepolicy defer rmSriovNetworkPolicy(oc, sriovNetPolicy1.policyName, sriovOpNs) sriovNetPolicy1.createPolicy(oc) waitForSriovPolicySyncUpStart(oc, sriovNetPolicy1.namespace) //create a new sriov policy before nodes sync up ready defer rmSriovNetworkPolicy(oc, sriovNetPolicy2.policyName, sriovOpNs) sriovNetPolicy2.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("4) ####### delete and recreate sriov network policy ############") //delete sriov policy and recreate it before nodes sync up ready _, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("SriovNetworkNodePolicy", sriovNetPolicy1.policyName, "-n", sriovOpNs, "--ignore-not-found").Output() o.Expect(err).NotTo(o.HaveOccurred()) waitForSriovPolicySyncUpStart(oc, sriovNetPolicy1.namespace) defer rmSriovNetworkPolicy(oc, sriovNetPolicy1.policyName, sriovOpNs) sriovNetPolicy1.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-56613-[sts] sriov pod can be worked well with netdevice deviceType for sts adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "stsnet", deviceType: "netdevice", deviceID: "1591", pfName: "ens4f3", vendor: "8086", numVfs: 3, resourceName: "stsnetdevice", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "stsnetdevice", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-netdevice") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready") g.By("Check test pod have second interface with assigned ip") command := "ip a show net1" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("10.56.217")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-56611-[sts] DPDK works well in pod with vfio-pci for sts adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "stsdpdk", deviceType: "vfio-pci", deviceID: "1591", pfName: "ens4f3", vendor: "8086", numVfs: 2, resourceName: "stsdpdk", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) }) g.It("Author:zzhao-Medium-NonPreRelease-Longduration-69134-SR-IOV VFs can be created and do not need to wait all the nodes in the pools are updated [Disruptive]", func() { //bug https://issues.redhat.com/browse/OCPBUGS-10323 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") hugepageMC = filepath.Join(buildPruningBaseDir, "hugepageMC.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovOpNs = "openshift-sriov-network-operator" iperfRcTmp = filepath.Join(buildPruningBaseDir, "iperf-rc-template.json") sriovNetworkType = "k8s.v1.cni.cncf.io/networks" sriovNodeLabel = "feature.node.kubernetes.io/sriov-capable=true" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "cx5", deviceType: "netdevice", deviceID: "1017", pfName: "ens1f1np1", vendor: "15b3", numVfs: 3, resourceName: "cx5n", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } exutil.By("check sriov worker is ready in 2 minute, if not skip this case") exutil.AssertOrCheckMCP(oc, "sriov", 20*time.Second, 2*time.Minute, true) exutil.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) exutil.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } exutil.By("Create sriovnetworkpolicy to create VF and check they are created successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) exutil.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create mc to make sriov worker reboot one by one and check the pods can be running on first ready node") defer func() { exutil.By("wait mcp recovered") err := exutil.AssertOrCheckMCP(oc, "sriov", 60*time.Second, 30*time.Minute, false) o.Expect(err).Should(o.BeNil()) }() defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", hugepageMC).Execute() err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", hugepageMC).Execute() o.Expect(err).NotTo(o.HaveOccurred()) sriovScheduleDisableNodeName := findSchedulingDisabledNode(oc, 5*time.Second, 2*time.Minute, sriovNodeLabel) e2e.Logf("Currently scheduleDisable worker is %s", sriovScheduleDisableNodeName) checkNodeStatus(oc, sriovScheduleDisableNodeName, "NotReady") checkNodeStatus(oc, sriovScheduleDisableNodeName, "Ready") exutil.By("Create test pod on the target namespace") iperfPod := sriovNetResource{ name: "iperf-rc", namespace: ns1, tempfile: iperfRcTmp, kind: "rc", } //create iperf server pod on worker0 iperfPod.create(oc, "PODNAME="+iperfPod.name, "NAMESPACE="+iperfPod.namespace, "NETNAME="+sriovnetwork.name, "NETTYPE="+sriovNetworkType, "NODENAME="+sriovScheduleDisableNodeName) defer iperfPod.delete(oc) err = waitForPodWithLabelReady(oc, ns1, "name=iperf-rc") exutil.AssertWaitPollNoErr(err, "this pod was not ready with label name=iperf-rc") exutil.By("Check another worker still in scheduleDisable") sriovScheduleDisableNodeName2 := findSchedulingDisabledNode(oc, 5*time.Second, 2*time.Minute, sriovNodeLabel) e2e.Logf("Currently scheduleDisable worker is %s", sriovScheduleDisableNodeName2) o.Expect(sriovScheduleDisableNodeName2).NotTo(o.Equal(sriovScheduleDisableNodeName)) }) g.It("Author:zzhao-Medium-54368-Medium-54393-The MAC address entry in the ARP table of the source pod should be updated when the MAC address of the destination pod changes while retaining the same IP address [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 4 caseID = "54368-" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "off", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create 2 test pods to consume the whereabouts ip") //create full number pods which use all of the VFs testpodPrex := "testpod" testpodNum := 2 createNumPods(oc, sriovnetwork.name, ns1, testpodPrex, testpodNum) exutil.By("now from one testpod to ping another one and check the mac address from arp") pod1Name := getPodName(oc, ns1, "name=sriov-netdevice") pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns1, pod1Name[0]) e2e.Logf("The second interface v4 address of pod1 is: %v", pod1IPv4) e2e.Logf("The second interface v6 address of pod1 is: %v", pod1IPv6) command := fmt.Sprintf("ping -c 3 %s && ping6 -c 3 %s", pod1IPv4, pod1IPv6) pingOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name[1], command, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pingOutput).To(o.ContainSubstring("3 received")) exutil.By("new pods will fail because all ips from whereabouts already be used") sriovTestNewPod := sriovTestPod{ name: "testpodnew", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestNewPod.createSriovTestPod(oc) e2e.Logf("creating new testpod should fail, because all ips from whereabouts already be used") o.Eventually(func() string { podStatus, _ := getPodStatus(oc, ns1, sriovTestNewPod.name) return podStatus }, 10*time.Second, 2*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", sriovTestNewPod.name)) exutil.By("delete the first pod and testpodnew will be ready") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns1, "pod", pod1Name[0]).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.AssertPodToBeReady(oc, sriovTestNewPod.name, ns1) newPodMac := getInterfaceMac(oc, ns1, sriovTestNewPod.name, "net1") exutil.By("check the entry of arp table for ipv4 is updated") commandv4 := fmt.Sprintf("ip neigh show %s | awk '{print $5}'", pod1IPv4) arpIpv4MacOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name[1], commandv4, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("arp for ipv4: %v", arpIpv4MacOutput) o.Expect(arpIpv4MacOutput).To(o.ContainSubstring(newPodMac)) exutil.By("check the entry of arp table for ipv6 is updated") commandv6 := fmt.Sprintf("ip neigh show %s | awk '{print $5}'", pod1IPv6) arpIpv6MacOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name[1], commandv6, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("arp for ipv6: %v", arpIpv6MacOutput) o.Expect(arpIpv6MacOutput).To(o.ContainSubstring(newPodMac)) }) g.It("LEVEL0-Author:zzhao-NonPreRelease-Longduration-Critical-49860-pods numbers same with VF numbers can be still working after worker reboot [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodRCTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-rc-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 2 caseID = "49860-test" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "off", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create 2 test pods with rc to consume the whereabouts ip") //create full number pods which use all of the VFs sriovTestPod := sriovTestPod{ name: caseID, namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodRCTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") exutil.By("ping from one pod to another with ipv4 and ipv6") podName := getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) exutil.By("Get node name of the pod") nodeName, nodeNameErr := exutil.GetPodNodeName(oc, ns1, podName[0]) o.Expect(nodeNameErr).NotTo(o.HaveOccurred()) exutil.By("Reboot node.") defer checkNodeStatus(oc, nodeName, "Ready") rebootNode(oc, nodeName) checkNodeStatus(oc, nodeName, "NotReady") checkNodeStatus(oc, nodeName, "Ready") exutil.By("ping from one pod to another with ipv4 and ipv6 after worker reboot") err = waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") podName = getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) }) g.It("Author:zzhao-Medium-55181-pci-address should be contained in networks-status annotation when using the tuning metaPlugin on SR-IOV Networks [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 4 caseID = "55181-" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "off", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create test pod with the VF") sriovTestPod := sriovTestPod{ name: "testpod", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "app=testpod") exutil.AssertWaitPollNoErr(err, "pods with label app=testpod not ready") exutil.By("get the pci-address of the sriov interface") pciAddress := getPciAddress(ns1, sriovTestPod.name, policyName) exutil.By("check the pod info should contain pci-address") command := fmt.Sprintf("cat /etc/podnetinfo/annotations") podNetinfo, err := e2eoutput.RunHostCmdWithRetries(ns1, sriovTestPod.name, command, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(podNetinfo, pciAddress)).Should(o.BeTrue()) }) g.It("Author:zzhao-NonPreRelease-Longduration-Medium-73965-pods with sriov VF created and deleted 10 times [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml") sriovTestPodRCTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-rc-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 2 caseID = "73965-test" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "on", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create 2 test pods with rc to consume the whereabouts ip") //create full number pods which use all of the VFs sriovTestPod := sriovTestPod{ name: caseID, namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodRCTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") exutil.By("ping from one pod to another with ipv4 and ipv6") podName := getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) exutil.By("Delete and recreate pods 10 times to check pods reuse the VF and traffic pass") for i := 1; i <= 10; i++ { err := oc.WithoutNamespace().Run("delete").Args("pods", "--all", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred(), "Couldn't delete pods") err = waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") podName = getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) } }) g.Context("NetObserv SRIOV Scenarios", func() { var ( networkBaseDir = exutil.FixturePath("testdata", "networking") sriovBaseDir = filepath.Join(networkBaseDir, "sriov") sriovNetPolicyName = "netpolicy67619" sriovNetDeviceName1 = "netdevice67619-1" sriovNetDeviceName2 = "netdevice67619-2" sriovOpNs = "openshift-sriov-network-operator" podName1 = "sriov-67619-testpod1" podName2 = "sriov-67619-testpod2" pfName = "" deviceID = "" vendorID = "8086" vfNum = 4 ipv4Addr1 = "192.168.122.71" ipv4Addr2 = "192.168.122.72" sriovIntf = "net1" podTempfile = "sriov-testpod-netobserv-template.yaml" netobservNS = "openshift-netobserv-operator" NOPackageName = "netobserv-operator" catsrc = netobserv.Resource{Kind: "catsrc", Name: "netobserv-konflux-fbc", Namespace: "openshift-marketplace"} subscriptionDir = exutil.FixturePath("testdata", "netobserv", "subscription") NOSource = netobserv.CatalogSourceObjects{Channel: "stable", SourceName: catsrc.Name, SourceNamespace: catsrc.Namespace} // Operator namespace object OperatorNS = netobserv.OperatorNamespace{ Name: netobservNS, NamespaceTemplate: filePath.Join(subscriptionDir, "namespace.yaml"), } baseDir = exutil.FixturePath("testdata", "netobserv") flowFixturePath = filePath.Join(baseDir, "flowcollector_v1beta2_template.yaml") lokiDir = filePath.Join(baseDir, "loki") lokipvcFixturePath = filePath.Join(lokiDir, "loki-pvc.yaml") zeroClickLokiFixturePath = filePath.Join(lokiDir, "0-click-loki.yaml") NO = netobserv.SubscriptionObjects{ OperatorName: "netobserv-operator", Namespace: netobservNS, PackageName: NOPackageName, Subscription: filePath.Join(subscriptionDir, "sub-template.yaml"), OperatorGroup: filePath.Join(subscriptionDir, "allnamespace-og.yaml"), CatalogSource: &NOSource, } sriovNetworkAttachTmpFile = filepath.Join(sriovBaseDir, "sriovnetwork-netobserv.yaml") sriovNetwork1 = sriovNetResource{ name: sriovNetDeviceName1, namespace: sriovOpNs, tempfile: sriovNetworkAttachTmpFile, ip: ipv4Addr1 + "/24", kind: "SriovNetwork", } sriovNetwork2 = sriovNetResource{ name: sriovNetDeviceName2, namespace: sriovOpNs, tempfile: sriovNetworkAttachTmpFile, ip: ipv4Addr2 + "/24", kind: "SriovNetwork", } podTempFile1 = filepath.Join(sriovBaseDir, podTempfile) testPod1 = sriovPod{ name: podName1, namespace: sriovOpNs, tempfile: podTempFile1, ipv4addr: ipv4Addr1, intfname: sriovIntf, intfresource: sriovNetDeviceName1, pingip: ipv4Addr2, } testPod2 = sriovPod{ name: podName2, namespace: sriovOpNs, tempfile: podTempFile1, ipv4addr: ipv4Addr2, intfname: sriovIntf, intfresource: sriovNetDeviceName2, pingip: ipv4Addr1, } flow netobserv.Flowcollector ) g.BeforeEach(func() { lokiUrl := fmt.Sprintf("http://loki.%s.svc:3100", oc.Namespace()) flow = netobserv.Flowcollector{ Namespace: oc.Namespace(), Template: flowFixturePath, LokiMode: "Monolithic", MonolithicLokiURL: lokiUrl, EBPFPrivileged: "true", } g.By("Get SRIOV interfaces") route, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(route, "sriov.openshift-qe.sdn.com") { g.By("Running test on RDU1 cluster") pfName = "ens2f0" deviceID = "159b" } else if strings.Contains(route, "offload.openshift-qe.sdn.com") { g.By("Running test on RDU2 cluster") pfName = "ens2f1" deviceID = "1583" } else { g.Skip("This case will only run on RDU1/RDU2 cluster. Skip for other envrionment!!!") } g.By("####### Check openshift-sriov-network-operator is running well ##########") // assuming SRIOV Operator will be present in the cluster. // also assuming SRIOVOperatorConfig is applied in OCP versions >= 4.16 chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Deploy konflux FBC and ImageDigestMirrorSet") imageDigest := filePath.Join(subscriptionDir, "image-digest-mirror-set.yaml") catSrcTemplate := filePath.Join(subscriptionDir, "catalog-source.yaml") exutil.ApplyNsResourceFromTemplate(oc, catsrc.Namespace, "--ignore-unknown-parameters=true", "-f", catSrcTemplate, "-p", "NAMESPACE="+catsrc.Namespace) netobserv.WaitUntilCatSrcReady(oc, catsrc.Name) netobserv.ApplyResourceFromFile(oc, netobservNS, imageDigest) g.By(fmt.Sprintf("Subscribe operators to %s channel", NOSource.Channel)) // check if Network Observability Operator is already present NOexisting := netobserv.CheckOperatorStatus(oc, NO.Namespace, NO.PackageName) // create operatorNS and deploy operator if not present if !NOexisting { OperatorNS.DeployOperatorNamespace(oc) NO.SubscribeOperator(oc) // check if NO operator is deployed netobserv.WaitForPodsReadyWithLabel(oc, NO.Namespace, "app="+NO.OperatorName) NOStatus := netobserv.CheckOperatorStatus(oc, NO.Namespace, NO.PackageName) o.Expect((NOStatus)).To(o.BeTrue()) } g.By("Deploy 0-click loki") _, _, err = exutil.SetupK8SNFSServerAndVolume(oc, 1) o.Expect(err).NotTo(o.HaveOccurred()) exutil.ApplyNsResourceFromTemplate(oc, oc.Namespace(), "--ignore-unknown-parameters=true", "-f", lokipvcFixturePath, "-p", "NAMESPACE="+oc.Namespace()) exutil.ApplyNsResourceFromTemplate(oc, oc.Namespace(), "--ignore-unknown-parameters=true", "-f", zeroClickLokiFixturePath) waitPodReady(oc, oc.Namespace(), "loki") }) g.AfterEach(func() { err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "loki", "-n", oc.Namespace()).Execute() o.Expect(err).ToNot(o.HaveOccurred()) err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "loki-store", "-n", oc.Namespace()).Execute() o.Expect(err).ToNot(o.HaveOccurred()) }) g.When("Agents are up prior to SRIOV interfaces", func() { g.It("Author:memodi-NonPreRelease-Medium-67619-Verify NetObserv flows are seen on SRIOV interfaces [Serial]", func() { g.By("Deploy flowcollector") defer flow.DeleteFlowcollector(oc) flow.CreateFlowcollector(oc) flow.WaitForFlowcollectorReady(oc) g.By("####### Create sriov network policy to create VF ############") defer rmSriovNetworkPolicy(oc, sriovNetPolicyName, sriovOpNs) result := initVF(oc, sriovNetPolicyName, deviceID, pfName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } g.By("######### Create sriov network attachment ############") e2e.Logf("create sriov network attachment via template") defer sriovNetwork1.delete(oc) sriovNetwork1.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork1.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork1.ip) defer sriovNetwork2.delete(oc) sriovNetwork2.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork2.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork2.ip) g.By("########### Create Pod and attach sriov interface using cli ##########") defer testPod1.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod1.namespace, "--ignore-unknown-parameters=true", "-f", testPod1.tempfile, "-p", "PODNAME="+testPod1.name, "SRIOVNETNAME="+testPod1.intfresource, "PING_IP="+testPod1.pingip) testPod1.waitForPodReady(oc) intfInfo1 := testPod1.getSriovIntfonPod(oc) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.intfname)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod1.name) defer testPod2.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod2.namespace, "--ignore-unknown-parameters=true", "-f", testPod2.tempfile, "-p", "PODNAME="+testPod2.name, "SRIOVNETNAME="+testPod2.intfresource, "PING_IP="+testPod2.pingip) testPod2.waitForPodReady(oc) intfInfo2 := testPod2.getSriovIntfonPod(oc) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.intfname)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod2.name) // sleep for 30 sec for flowlogs to be ingested in Loki time.Sleep(30 * time.Second) cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("svc/loki", "3100:3100", "-n", oc.Namespace()).Background() defer cmd.Process.Kill() o.Expect(err).NotTo(o.HaveOccurred()) lokilabels := netobserv.Lokilabels{ App: "netobserv-flowcollector", } interfaceParam := fmt.Sprintf("\"\\\"Interfaces\\\":.*%s.*\"", testPod2.intfname) parameters := []string{interfaceParam} flowRecords, err := lokilabels.GetMonolithicLokiFlowLogs("http://localhost:3100", time.Now(), parameters...) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(flowRecords)).To(o.BeNumerically(">", 0), "expected number of flowRecords to be greater than 0") }) }) g.When("SRIOV interfaces are up prior to agents", func() { g.It("Author:memodi-NonPreRelease-Medium-67619-Verify NetObserv flows are seen on SRIOV interfaces [Serial]", func() { g.By("####### Create sriov network policy to create VF ############") defer rmSriovNetworkPolicy(oc, sriovNetPolicyName, sriovOpNs) result := initVF(oc, sriovNetPolicyName, deviceID, pfName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } g.By("######### Create sriov network attachment ############") e2e.Logf("create sriov network attachment via template") defer sriovNetwork1.delete(oc) sriovNetwork1.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork1.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork1.ip) defer sriovNetwork2.delete(oc) sriovNetwork2.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork2.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork2.ip) g.By("########### Create Pod and attach sriov interface using cli ##########") defer testPod1.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod1.namespace, "--ignore-unknown-parameters=true", "-f", testPod1.tempfile, "-p", "PODNAME="+testPod1.name, "SRIOVNETNAME="+testPod1.intfresource, "PING_IP="+testPod1.pingip) testPod1.waitForPodReady(oc) intfInfo1 := testPod1.getSriovIntfonPod(oc) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.intfname)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod1.name) defer testPod2.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod2.namespace, "--ignore-unknown-parameters=true", "-f", testPod2.tempfile, "-p", "PODNAME="+testPod2.name, "SRIOVNETNAME="+testPod2.intfresource, "PING_IP="+testPod2.pingip) testPod2.waitForPodReady(oc) intfInfo2 := testPod2.getSriovIntfonPod(oc) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.intfname)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod2.name) g.By("Deploy flowcollector") defer flow.DeleteFlowcollector(oc) flow.CreateFlowcollector(oc) flow.WaitForFlowcollectorReady(oc) // sleep for 30 sec for flowlogs to be ingested in Loki time.Sleep(30 * time.Second) cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("svc/loki", "3100:3100", "-n", oc.Namespace()).Background() defer cmd.Process.Kill() o.Expect(err).NotTo(o.HaveOccurred()) lokilabels := netobserv.Lokilabels{ App: "netobserv-flowcollector", } interfaceParam := fmt.Sprintf("\"\\\"Interfaces\\\":.*%s.*\"", testPod2.intfname) parameters := []string{interfaceParam} flowRecords, err := lokilabels.GetMonolithicLokiFlowLogs("http://localhost:3100", time.Now(), parameters...) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(flowRecords)).To(o.BeNumerically(">", 0), "expected number of flowRecords to be greater than 0") }) }) }) })
package networking
test case
openshift/openshift-tests-private
d7e38cb4-28fb-4f33-917c-c78932e927d4
NonPreRelease-Author:yingwang-Medium-Longduration-42253-Pod with sriov interface should be created successfully with empty pod.ObjectMeta.Namespace in body [Disruptive]
['"path/filepath"', '"regexp"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("NonPreRelease-Author:yingwang-Medium-Longduration-42253-Pod with sriov interface should be created successfully with empty pod.ObjectMeta.Namespace in body [Disruptive]", func() { var ( networkBaseDir = exutil.FixturePath("testdata", "networking") sriovBaseDir = filepath.Join(networkBaseDir, "sriov") sriovNetPolicyName = "netpolicy42253" sriovNetDeviceName = "netdevice42253" sriovOpNs = "openshift-sriov-network-operator" podName1 = "sriov-42253-testpod1" podName2 = "sriov-42253-testpod2" pfName = "ens2f0" deviceID = "1015" ipv4Addr1 = "192.168.2.5/24" ipv6Addr1 = "2002::5/64" ipv4Addr2 = "192.168.2.6/24" ipv6Addr2 = "2002::6/64" sriovIntf = "net1" podTempfile = "sriov-testpod-template.yaml" serviceAccount = "deployer" ) sriovNetworkPolicyTmpFile := filepath.Join(sriovBaseDir, "netpolicy42253-template.yaml") sriovNetworkPolicy := sriovNetResource{ name: sriovNetPolicyName, namespace: sriovOpNs, tempfile: sriovNetworkPolicyTmpFile, kind: "SriovNetworkNodePolicy", } sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "netdevice42253-template.yaml") sriovNetwork := sriovNetResource{ name: sriovNetDeviceName, namespace: sriovOpNs, tempfile: sriovNetworkAttachTmpFile, kind: "SriovNetwork", } g.By("1) ####### Check openshift-sriov-network-operator is running well ##########") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } //make sure the pf and sriov network policy name are not occupied rmSriovNetworkPolicy(oc, sriovNetworkPolicy.name, sriovNetworkPolicy.namespace) rmSriovNetwork(oc, sriovNetwork.name, sriovNetwork.namespace) oc.SetupProject() g.By("2) ####### Create sriov network policy ############") sriovNetworkPolicy.create(oc, "PFNAME="+pfName, "DEVICEID="+deviceID, "SRIOVNETPOLICY="+sriovNetworkPolicy.name) defer rmSriovNetworkPolicy(oc, sriovNetworkPolicy.name, sriovNetworkPolicy.namespace) waitForSriovPolicyReady(oc, sriovNetworkPolicy.namespace) g.By("3) ######### Create sriov network attachment ############") e2e.Logf("create sriov network attachment via template") sriovNetwork.create(oc, "TARGETNS="+oc.Namespace(), "SRIOVNETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetworkPolicy.name) defer sriovNetwork.delete(oc) // ensure the resource is deleted whether the case exist normally or not. g.By("4) ########### Create Pod and attach sriov interface using cli ##########") podTempFile1 := filepath.Join(sriovBaseDir, podTempfile) testPod1 := sriovPod{ name: podName1, namespace: oc.Namespace(), tempfile: podTempFile1, ipv4addr: ipv4Addr1, ipv6addr: ipv6Addr1, intfname: sriovIntf, intfresource: sriovNetDeviceName, } podsLog := testPod1.createPod(oc) defer testPod1.deletePod(oc) // ensure the resource is deleted whether the case exist normally or not. testPod1.waitForPodReady(oc) intfInfo1 := testPod1.getSriovIntfonPod(oc) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.intfname)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv4addr)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv6addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod1.name) g.By("5) ########### Create Pod via url without namespace ############") podTempFile2 := filepath.Join(sriovBaseDir, podTempfile) testPod2 := sriovPod{ name: podName2, namespace: oc.Namespace(), tempfile: podTempFile2, ipv4addr: ipv4Addr2, ipv6addr: ipv6Addr2, intfname: sriovIntf, intfresource: sriovNetDeviceName, } e2e.Logf("extract curl reqeust command from logs of creating pod via cli") re := regexp.MustCompile("(curl.+-XPOST.+kubectl-create')") match := re.FindStringSubmatch(podsLog) curlCmd := match[1] e2e.Logf("Extracted curl from pod creating logs is %s", curlCmd) //creating pod via curl request testPod2.sendHTTPRequest(oc, serviceAccount, curlCmd) defer testPod2.deletePod(oc) testPod2.waitForPodReady(oc) intfInfo2 := testPod2.getSriovIntfonPod(oc) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.intfname)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv4addr)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv6addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod2.name) })
test case
openshift/openshift-tests-private
9423c586-0bd4-4fd5-9117-dd7e5a2b5e7e
Author:zzhao-Medium-NonPreRelease-Longduration-25321-[E810-C] Check intel dpdk works well [Disruptive]
['"path/filepath"', 'filePath "path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25321-[E810-C] Check intel dpdk works well [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" sriovNodeLabel = "feature.node.kubernetes.io/sriov-capable=true" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810", deviceType: "vfio-pci", deviceID: "1593", pfName: "ens2f2", vendor: "8086", numVfs: 2, resourceName: "e810dpdk", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") sriovPolicy.createPolicy(oc) defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) waitForSriovPolicyReady(oc, sriovOpNs) g.By("check the vhost is loaded") sriovNode := getSriovNode(oc, sriovOpNs, sriovNodeLabel) output, err := exutil.DebugNodeWithChroot(oc, sriovNode, "bash", "-c", "lsmod | grep vhost") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).To(o.ContainSubstring("vhost_net")) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } sriovnetwork.createSriovNetwork(oc) defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err = waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) })
test case
openshift/openshift-tests-private
55651f31-84b0-4809-a5d2-68bd105a62b2
Author:zzhao-Medium-NonPreRelease-Longduration-49213-[E810-C] VF with large number can be inited for intel card [Disruptive]
['"path/filepath"', 'filePath "path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-49213-[E810-C] VF with large number can be inited for intel card [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovOpNs = "openshift-sriov-network-operator" sriovNodeLabel = "feature.node.kubernetes.io/sriov-capable=true" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810", deviceType: "netdevice", deviceID: "1593", pfName: "ens2f0", vendor: "8086", numVfs: 40, resourceName: "e810net", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") sriovPolicy.createPolicy(oc) defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) waitForSriovPolicyReady(oc, sriovOpNs) g.By("check the link show the correct VF") sriovNode := getSriovNode(oc, sriovOpNs, sriovNodeLabel) output, err := exutil.DebugNodeWithChroot(oc, sriovNode, "bash", "-c", "ip l | grep "+sriovPolicy.pfName+"v | wc -l") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).To(o.ContainSubstring("40")) })
test case
openshift/openshift-tests-private
3bfc1488-0211-4a4b-98cc-f2b1eef496bc
Author:zzhao-Medium-NonPreRelease-Longduration-47660-[E810-XXV] DPDK works well in pod with vfio-pci for E810-XXVDA4 adapter [Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-47660-[E810-XXV] DPDK works well in pod with vfio-pci for E810-XXVDA4 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810xxv", deviceType: "vfio-pci", deviceID: "159b", pfName: "ens2f0", vendor: "8086", numVfs: 2, resourceName: "e810dpdk", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) })
test case
openshift/openshift-tests-private
75680255-1e20-4cc9-8eee-11af5782602e
Author:zzhao-Medium-NonPreRelease-Longduration-47661-[E810-XXV] sriov pod with netdevice deviceType for E810-XXVDA4 adapter [Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-47661-[E810-XXV] sriov pod with netdevice deviceType for E810-XXVDA4 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "e810xxv", deviceType: "netdevice", deviceID: "159b", pfName: "ens2f0", vendor: "8086", numVfs: 3, resourceName: "e810netdevice", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "e810netdevice", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-netdevice") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready") g.By("Check test pod have second interface with assigned ip") command := "ip a show net1" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("10.56.217")) })
test case
openshift/openshift-tests-private
04fce068-c1b6-4370-93ca-ed2c53edfc86
Author:zzhao-Medium-NonPreRelease-Longduration-41145-[xl710] sriov pod can be worked well with netdevice deviceType for xl710 adapter [Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-41145-[xl710] sriov pod can be worked well with netdevice deviceType for xl710 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "xl710", deviceType: "netdevice", deviceID: "1583", pfName: "ens2f0", vendor: "8086", numVfs: 3, resourceName: "xl710netdevice", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "xl710netdevice", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-netdevice") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready") g.By("Check test pod have second interface with assigned ip") command := "ip a show net1" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("10.56.217")) })
test case
openshift/openshift-tests-private
4da59e9e-245a-4a0b-9278-6ad2861e1826
Author:zzhao-Medium-NonPreRelease-Longduration-41144-[xl710] DPDK works well in pod with vfio-pci for xl710 adapter [Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-41144-[xl710] DPDK works well in pod with vfio-pci for xl710 adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "xl710", deviceType: "vfio-pci", deviceID: "1583", pfName: "ens2f0", vendor: "8086", numVfs: 2, resourceName: "xl710", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) })
test case
openshift/openshift-tests-private
0488aaba-081e-4ead-9572-49432caed480
NonPreRelease-Longduration-Author:yingwang-Medium-50440-creating and deleting multiple sriovnetworknodepolicy, cluster can work well.[Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-50440-creating and deleting multiple sriovnetworknodepolicy, cluster can work well.[Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovOpNs = "openshift-sriov-network-operator" sriovNetPolicyName1 = "sriovpolicypf1" sriovNetPolicyName2 = "sriovpolicypf2" ) sriovNetPolicy1 := sriovNetworkNodePolicy{ policyName: sriovNetPolicyName1, deviceType: "netdevice", deviceID: "1015", pfName: "ens2f0", vendor: "15b3", numVfs: 2, resourceName: sriovNetPolicyName1, template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } sriovNetPolicy2 := sriovNetworkNodePolicy{ policyName: sriovNetPolicyName2, deviceType: "netdevice", deviceID: "1015", pfName: "ens2f1", vendor: "15b3", numVfs: 2, resourceName: sriovNetPolicyName2, template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("1) ####### Check openshift-sriov-network-operator is running well ##########") chkSriovOperatorStatus(oc, sriovOpNs) g.By("2) Check the deviceID exists on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovNetPolicy1.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("3) ####### create a new sriov policy before the previous one is ready ############") //create one sriovnetworknodepolicy defer rmSriovNetworkPolicy(oc, sriovNetPolicy1.policyName, sriovOpNs) sriovNetPolicy1.createPolicy(oc) waitForSriovPolicySyncUpStart(oc, sriovNetPolicy1.namespace) //create a new sriov policy before nodes sync up ready defer rmSriovNetworkPolicy(oc, sriovNetPolicy2.policyName, sriovOpNs) sriovNetPolicy2.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("4) ####### delete and recreate sriov network policy ############") //delete sriov policy and recreate it before nodes sync up ready _, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("SriovNetworkNodePolicy", sriovNetPolicy1.policyName, "-n", sriovOpNs, "--ignore-not-found").Output() o.Expect(err).NotTo(o.HaveOccurred()) waitForSriovPolicySyncUpStart(oc, sriovNetPolicy1.namespace) defer rmSriovNetworkPolicy(oc, sriovNetPolicy1.policyName, sriovOpNs) sriovNetPolicy1.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) })
test case
openshift/openshift-tests-private
68ab97ee-f99f-403f-bf2a-704203e9d9f7
Author:zzhao-Medium-NonPreRelease-Longduration-56613-[sts] sriov pod can be worked well with netdevice deviceType for sts adapter [Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-56613-[sts] sriov pod can be worked well with netdevice deviceType for sts adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "stsnet", deviceType: "netdevice", deviceID: "1591", pfName: "ens4f3", vendor: "8086", numVfs: 3, resourceName: "stsnetdevice", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "stsnetdevice", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-netdevice") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready") g.By("Check test pod have second interface with assigned ip") command := "ip a show net1" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("10.56.217")) })
test case
openshift/openshift-tests-private
8af25b3a-bf71-4179-aa0b-12df6ff6c5e1
Author:zzhao-Medium-NonPreRelease-Longduration-56611-[sts] DPDK works well in pod with vfio-pci for sts adapter [Disruptive]
['"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-56611-[sts] DPDK works well in pod with vfio-pci for sts adapter [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml") sriovOpNs = "openshift-sriov-network-operator" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "stsdpdk", deviceType: "vfio-pci", deviceID: "1591", pfName: "ens4f3", vendor: "8086", numVfs: 2, resourceName: "stsdpdk", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } g.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) g.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } g.By("Create sriovnetworkpolicy to init VF and check they are inited successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) g.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) g.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) g.By("Create test pod on the target namespace") sriovTestPod := sriovTestPod{ name: "sriovdpdk", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk") exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-dpdk not ready") g.By("Check testpmd running well") pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, sriovPolicy.resourceName) command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac" testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams")) })
test case
openshift/openshift-tests-private
ce9a33d9-477e-4895-8fd1-9e7f6b679390
Author:zzhao-Medium-NonPreRelease-Longduration-69134-SR-IOV VFs can be created and do not need to wait all the nodes in the pools are updated [Disruptive]
['"path/filepath"', '"time"', 'filePath "path/filepath"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-69134-SR-IOV VFs can be created and do not need to wait all the nodes in the pools are updated [Disruptive]", func() { //bug https://issues.redhat.com/browse/OCPBUGS-10323 var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml") hugepageMC = filepath.Join(buildPruningBaseDir, "hugepageMC.yaml") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovOpNs = "openshift-sriov-network-operator" iperfRcTmp = filepath.Join(buildPruningBaseDir, "iperf-rc-template.json") sriovNetworkType = "k8s.v1.cni.cncf.io/networks" sriovNodeLabel = "feature.node.kubernetes.io/sriov-capable=true" ) sriovPolicy := sriovNetworkNodePolicy{ policyName: "cx5", deviceType: "netdevice", deviceID: "1017", pfName: "ens1f1np1", vendor: "15b3", numVfs: 3, resourceName: "cx5n", template: sriovNetworkNodePolicyTemplate, namespace: sriovOpNs, } exutil.By("check sriov worker is ready in 2 minute, if not skip this case") exutil.AssertOrCheckMCP(oc, "sriov", 20*time.Second, 2*time.Minute, true) exutil.By("check the sriov operator is running") chkSriovOperatorStatus(oc, sriovOpNs) exutil.By("Check the deviceID if exist on the cluster worker") if !checkDeviceIDExist(oc, sriovOpNs, sriovPolicy.deviceID) { g.Skip("the cluster do not contain the sriov card. skip this testing!") } exutil.By("Create sriovnetworkpolicy to create VF and check they are created successfully") defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs) sriovPolicy.createPolicy(oc) waitForSriovPolicyReady(oc, sriovOpNs) exutil.By("setup one namespace") ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: sriovPolicy.policyName, resourceName: sriovPolicy.resourceName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create mc to make sriov worker reboot one by one and check the pods can be running on first ready node") defer func() { exutil.By("wait mcp recovered") err := exutil.AssertOrCheckMCP(oc, "sriov", 60*time.Second, 30*time.Minute, false) o.Expect(err).Should(o.BeNil()) }() defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", hugepageMC).Execute() err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", hugepageMC).Execute() o.Expect(err).NotTo(o.HaveOccurred()) sriovScheduleDisableNodeName := findSchedulingDisabledNode(oc, 5*time.Second, 2*time.Minute, sriovNodeLabel) e2e.Logf("Currently scheduleDisable worker is %s", sriovScheduleDisableNodeName) checkNodeStatus(oc, sriovScheduleDisableNodeName, "NotReady") checkNodeStatus(oc, sriovScheduleDisableNodeName, "Ready") exutil.By("Create test pod on the target namespace") iperfPod := sriovNetResource{ name: "iperf-rc", namespace: ns1, tempfile: iperfRcTmp, kind: "rc", } //create iperf server pod on worker0 iperfPod.create(oc, "PODNAME="+iperfPod.name, "NAMESPACE="+iperfPod.namespace, "NETNAME="+sriovnetwork.name, "NETTYPE="+sriovNetworkType, "NODENAME="+sriovScheduleDisableNodeName) defer iperfPod.delete(oc) err = waitForPodWithLabelReady(oc, ns1, "name=iperf-rc") exutil.AssertWaitPollNoErr(err, "this pod was not ready with label name=iperf-rc") exutil.By("Check another worker still in scheduleDisable") sriovScheduleDisableNodeName2 := findSchedulingDisabledNode(oc, 5*time.Second, 2*time.Minute, sriovNodeLabel) e2e.Logf("Currently scheduleDisable worker is %s", sriovScheduleDisableNodeName2) o.Expect(sriovScheduleDisableNodeName2).NotTo(o.Equal(sriovScheduleDisableNodeName)) })
test case
openshift/openshift-tests-private
c899a71e-d86b-491d-87ed-b6fc2473f208
Author:zzhao-Medium-54368-Medium-54393-The MAC address entry in the ARP table of the source pod should be updated when the MAC address of the destination pod changes while retaining the same IP address [Disruptive]
['"fmt"', '"path/filepath"', '"time"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-54368-Medium-54393-The MAC address entry in the ARP table of the source pod should be updated when the MAC address of the destination pod changes while retaining the same IP address [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 4 caseID = "54368-" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "off", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create 2 test pods to consume the whereabouts ip") //create full number pods which use all of the VFs testpodPrex := "testpod" testpodNum := 2 createNumPods(oc, sriovnetwork.name, ns1, testpodPrex, testpodNum) exutil.By("now from one testpod to ping another one and check the mac address from arp") pod1Name := getPodName(oc, ns1, "name=sriov-netdevice") pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns1, pod1Name[0]) e2e.Logf("The second interface v4 address of pod1 is: %v", pod1IPv4) e2e.Logf("The second interface v6 address of pod1 is: %v", pod1IPv6) command := fmt.Sprintf("ping -c 3 %s && ping6 -c 3 %s", pod1IPv4, pod1IPv6) pingOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name[1], command, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pingOutput).To(o.ContainSubstring("3 received")) exutil.By("new pods will fail because all ips from whereabouts already be used") sriovTestNewPod := sriovTestPod{ name: "testpodnew", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestNewPod.createSriovTestPod(oc) e2e.Logf("creating new testpod should fail, because all ips from whereabouts already be used") o.Eventually(func() string { podStatus, _ := getPodStatus(oc, ns1, sriovTestNewPod.name) return podStatus }, 10*time.Second, 2*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", sriovTestNewPod.name)) exutil.By("delete the first pod and testpodnew will be ready") err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns1, "pod", pod1Name[0]).Execute() o.Expect(err).NotTo(o.HaveOccurred()) exutil.AssertPodToBeReady(oc, sriovTestNewPod.name, ns1) newPodMac := getInterfaceMac(oc, ns1, sriovTestNewPod.name, "net1") exutil.By("check the entry of arp table for ipv4 is updated") commandv4 := fmt.Sprintf("ip neigh show %s | awk '{print $5}'", pod1IPv4) arpIpv4MacOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name[1], commandv4, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("arp for ipv4: %v", arpIpv4MacOutput) o.Expect(arpIpv4MacOutput).To(o.ContainSubstring(newPodMac)) exutil.By("check the entry of arp table for ipv6 is updated") commandv6 := fmt.Sprintf("ip neigh show %s | awk '{print $5}'", pod1IPv6) arpIpv6MacOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name[1], commandv6, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("arp for ipv6: %v", arpIpv6MacOutput) o.Expect(arpIpv6MacOutput).To(o.ContainSubstring(newPodMac)) })
test case
openshift/openshift-tests-private
e8b652e1-2bb1-4c65-a6ad-47411184b087
LEVEL0-Author:zzhao-NonPreRelease-Longduration-Critical-49860-pods numbers same with VF numbers can be still working after worker reboot [Disruptive]
['"fmt"', '"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("LEVEL0-Author:zzhao-NonPreRelease-Longduration-Critical-49860-pods numbers same with VF numbers can be still working after worker reboot [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-hostlocal-template.yaml") sriovTestPodRCTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-rc-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 2 caseID = "49860-test" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "off", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create 2 test pods with rc to consume the whereabouts ip") //create full number pods which use all of the VFs sriovTestPod := sriovTestPod{ name: caseID, namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodRCTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") exutil.By("ping from one pod to another with ipv4 and ipv6") podName := getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) exutil.By("Get node name of the pod") nodeName, nodeNameErr := exutil.GetPodNodeName(oc, ns1, podName[0]) o.Expect(nodeNameErr).NotTo(o.HaveOccurred()) exutil.By("Reboot node.") defer checkNodeStatus(oc, nodeName, "Ready") rebootNode(oc, nodeName) checkNodeStatus(oc, nodeName, "NotReady") checkNodeStatus(oc, nodeName, "Ready") exutil.By("ping from one pod to another with ipv4 and ipv6 after worker reboot") err = waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") podName = getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) })
test case
openshift/openshift-tests-private
3bf719ba-08e0-4da0-bedd-d7620f64e8fd
Author:zzhao-Medium-55181-pci-address should be contained in networks-status annotation when using the tuning metaPlugin on SR-IOV Networks [Disruptive]
['"fmt"', '"path/filepath"', '"strings"', '"time"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-Medium-55181-pci-address should be contained in networks-status annotation when using the tuning metaPlugin on SR-IOV Networks [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml") sriovTestPodTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 4 caseID = "55181-" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "off", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create test pod with the VF") sriovTestPod := sriovTestPod{ name: "testpod", namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "app=testpod") exutil.AssertWaitPollNoErr(err, "pods with label app=testpod not ready") exutil.By("get the pci-address of the sriov interface") pciAddress := getPciAddress(ns1, sriovTestPod.name, policyName) exutil.By("check the pod info should contain pci-address") command := fmt.Sprintf("cat /etc/podnetinfo/annotations") podNetinfo, err := e2eoutput.RunHostCmdWithRetries(ns1, sriovTestPod.name, command, 3*time.Second, 12*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(podNetinfo, pciAddress)).Should(o.BeTrue()) })
test case
openshift/openshift-tests-private
f8a3e3ce-cb27-410f-9e4a-76e2ab7c63ee
Author:zzhao-NonPreRelease-Longduration-Medium-73965-pods with sriov VF created and deleted 10 times [Disruptive]
['"fmt"', '"path/filepath"', 'filePath "path/filepath"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:zzhao-NonPreRelease-Longduration-Medium-73965-pods with sriov VF created and deleted 10 times [Disruptive]", func() { var ( buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov") sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml") sriovTestPodRCTemplate = filepath.Join(buildPruningBaseDir, "sriov-netdevice-rc-template.yaml") sriovOpNs = "openshift-sriov-network-operator" policyName = "e810c" deviceID = "1593" interfaceName = "ens2f2" vendorID = "8086" vfNum = 2 caseID = "73965-test" networkName = caseID + "net" ) ns1 := oc.Namespace() exutil.SetNamespacePrivileged(oc, ns1) exutil.By("Create snnp to create VF") // Create VF on with given device defer rmSriovNetworkPolicy(oc, policyName, sriovOpNs) result := initVF(oc, policyName, deviceID, interfaceName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace") sriovnetwork := sriovNetwork{ name: networkName, resourceName: policyName, networkNamespace: ns1, template: sriovNeworkTemplate, namespace: sriovOpNs, spoolchk: "on", trust: "on", } defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs) sriovnetwork.createSriovNetwork(oc) exutil.By("Create 2 test pods with rc to consume the whereabouts ip") //create full number pods which use all of the VFs sriovTestPod := sriovTestPod{ name: caseID, namespace: ns1, networkName: sriovnetwork.name, template: sriovTestPodRCTemplate, } sriovTestPod.createSriovTestPod(oc) err := waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") exutil.By("ping from one pod to another with ipv4 and ipv6") podName := getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) exutil.By("Delete and recreate pods 10 times to check pods reuse the VF and traffic pass") for i := 1; i <= 10; i++ { err := oc.WithoutNamespace().Run("delete").Args("pods", "--all", "-n", ns1).Execute() o.Expect(err).NotTo(o.HaveOccurred(), "Couldn't delete pods") err = waitForPodWithLabelReady(oc, ns1, "name="+caseID) exutil.AssertWaitPollNoErr(err, "pods with label name="+caseID+"sriov-netdevice not ready") podName = getPodName(oc, ns1, "name="+caseID) pingPassWithNet1(oc, ns1, podName[0], podName[1]) } })
test case
openshift/openshift-tests-private
96605f88-4154-4a8b-a817-d27838446c4d
Author:memodi-NonPreRelease-Medium-67619-Verify NetObserv flows are seen on SRIOV interfaces [Serial]
['"fmt"', '"time"', 'netobserv "github.com/openshift/openshift-tests-private/test/extended/netobserv"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:memodi-NonPreRelease-Medium-67619-Verify NetObserv flows are seen on SRIOV interfaces [Serial]", func() { g.By("Deploy flowcollector") defer flow.DeleteFlowcollector(oc) flow.CreateFlowcollector(oc) flow.WaitForFlowcollectorReady(oc) g.By("####### Create sriov network policy to create VF ############") defer rmSriovNetworkPolicy(oc, sriovNetPolicyName, sriovOpNs) result := initVF(oc, sriovNetPolicyName, deviceID, pfName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } g.By("######### Create sriov network attachment ############") e2e.Logf("create sriov network attachment via template") defer sriovNetwork1.delete(oc) sriovNetwork1.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork1.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork1.ip) defer sriovNetwork2.delete(oc) sriovNetwork2.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork2.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork2.ip) g.By("########### Create Pod and attach sriov interface using cli ##########") defer testPod1.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod1.namespace, "--ignore-unknown-parameters=true", "-f", testPod1.tempfile, "-p", "PODNAME="+testPod1.name, "SRIOVNETNAME="+testPod1.intfresource, "PING_IP="+testPod1.pingip) testPod1.waitForPodReady(oc) intfInfo1 := testPod1.getSriovIntfonPod(oc) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.intfname)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod1.name) defer testPod2.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod2.namespace, "--ignore-unknown-parameters=true", "-f", testPod2.tempfile, "-p", "PODNAME="+testPod2.name, "SRIOVNETNAME="+testPod2.intfresource, "PING_IP="+testPod2.pingip) testPod2.waitForPodReady(oc) intfInfo2 := testPod2.getSriovIntfonPod(oc) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.intfname)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod2.name) // sleep for 30 sec for flowlogs to be ingested in Loki time.Sleep(30 * time.Second) cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("svc/loki", "3100:3100", "-n", oc.Namespace()).Background() defer cmd.Process.Kill() o.Expect(err).NotTo(o.HaveOccurred()) lokilabels := netobserv.Lokilabels{ App: "netobserv-flowcollector", } interfaceParam := fmt.Sprintf("\"\\\"Interfaces\\\":.*%s.*\"", testPod2.intfname) parameters := []string{interfaceParam} flowRecords, err := lokilabels.GetMonolithicLokiFlowLogs("http://localhost:3100", time.Now(), parameters...) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(flowRecords)).To(o.BeNumerically(">", 0), "expected number of flowRecords to be greater than 0") })
test case
openshift/openshift-tests-private
d6626175-bcb3-41fc-a73d-1a9b1236b6da
Author:memodi-NonPreRelease-Medium-67619-Verify NetObserv flows are seen on SRIOV interfaces [Serial]
['"fmt"', '"time"', 'netobserv "github.com/openshift/openshift-tests-private/test/extended/netobserv"']
github.com/openshift/openshift-tests-private/test/extended/networking/sriov.go
g.It("Author:memodi-NonPreRelease-Medium-67619-Verify NetObserv flows are seen on SRIOV interfaces [Serial]", func() { g.By("####### Create sriov network policy to create VF ############") defer rmSriovNetworkPolicy(oc, sriovNetPolicyName, sriovOpNs) result := initVF(oc, sriovNetPolicyName, deviceID, pfName, vendorID, sriovOpNs, vfNum) // if the deviceid is not exist on the worker, skip this if !result { g.Skip(fmt.Sprintf("This nic which has deviceID %s is not found on this cluster!!!", deviceID)) } g.By("######### Create sriov network attachment ############") e2e.Logf("create sriov network attachment via template") defer sriovNetwork1.delete(oc) sriovNetwork1.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork1.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork1.ip) defer sriovNetwork2.delete(oc) sriovNetwork2.create(oc, "TARGETNS="+sriovOpNs, "SRIOVNETNAME="+sriovNetwork2.name, "SRIOVNETPOLICY="+sriovNetPolicyName, "IPSUBNET="+sriovNetwork2.ip) g.By("########### Create Pod and attach sriov interface using cli ##########") defer testPod1.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod1.namespace, "--ignore-unknown-parameters=true", "-f", testPod1.tempfile, "-p", "PODNAME="+testPod1.name, "SRIOVNETNAME="+testPod1.intfresource, "PING_IP="+testPod1.pingip) testPod1.waitForPodReady(oc) intfInfo1 := testPod1.getSriovIntfonPod(oc) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.intfname)) o.Expect(intfInfo1).Should(o.MatchRegexp(testPod1.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod1.name) defer testPod2.deletePod(oc) exutil.ApplyNsResourceFromTemplate(oc, testPod2.namespace, "--ignore-unknown-parameters=true", "-f", testPod2.tempfile, "-p", "PODNAME="+testPod2.name, "SRIOVNETNAME="+testPod2.intfresource, "PING_IP="+testPod2.pingip) testPod2.waitForPodReady(oc) intfInfo2 := testPod2.getSriovIntfonPod(oc) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.intfname)) o.Expect(intfInfo2).Should(o.MatchRegexp(testPod2.ipv4addr)) e2e.Logf("Check pod %s sriov interface and ip address PASS.", testPod2.name) g.By("Deploy flowcollector") defer flow.DeleteFlowcollector(oc) flow.CreateFlowcollector(oc) flow.WaitForFlowcollectorReady(oc) // sleep for 30 sec for flowlogs to be ingested in Loki time.Sleep(30 * time.Second) cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("svc/loki", "3100:3100", "-n", oc.Namespace()).Background() defer cmd.Process.Kill() o.Expect(err).NotTo(o.HaveOccurred()) lokilabels := netobserv.Lokilabels{ App: "netobserv-flowcollector", } interfaceParam := fmt.Sprintf("\"\\\"Interfaces\\\":.*%s.*\"", testPod2.intfname) parameters := []string{interfaceParam} flowRecords, err := lokilabels.GetMonolithicLokiFlowLogs("http://localhost:3100", time.Now(), parameters...) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(flowRecords)).To(o.BeNumerically(">", 0), "expected number of flowRecords to be greater than 0") })
test
openshift/openshift-tests-private
35bbcda3-8648-43ca-9c7f-900b473a0672
utils
import ( "context" "encoding/json" "fmt" "io" "math/rand" "net" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/openshift-tests-private/test/extended/util" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" netutils "k8s.io/utils/net" )
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
package networking import ( "context" "encoding/json" "fmt" "io" "math/rand" "net" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/openshift-tests-private/test/extended/util" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" netutils "k8s.io/utils/net" ) type pingPodResource struct { name string namespace string template string } type pingPodResourceNode struct { name string namespace string nodename string template string } type pingPodResourceWinNode struct { name string namespace string image string nodename string template string } type egressIPResource1 struct { name string template string egressIP1 string egressIP2 string nsLabelKey string nsLabelValue string podLabelKey string podLabelValue string } type egressFirewall1 struct { name string namespace string template string } type egressFirewall2 struct { name string namespace string ruletype string cidr string template string } type ipBlockCIDRsDual struct { name string namespace string cidrIpv4 string cidrIpv6 string cidr2Ipv4 string cidr2Ipv6 string cidr3Ipv4 string cidr3Ipv6 string template string } type ipBlockCIDRsSingle struct { name string namespace string cidr string cidr2 string cidr3 string template string } type ipBlockCIDRsExceptDual struct { name string namespace string cidrIpv4 string cidrIpv4Except string cidrIpv6 string cidrIpv6Except string cidr2Ipv4 string cidr2Ipv4Except string cidr2Ipv6 string cidr2Ipv6Except string cidr3Ipv4 string cidr3Ipv4Except string cidr3Ipv6 string cidr3Ipv6Except string template string } type ipBlockCIDRsExceptSingle struct { name string namespace string cidr string except string cidr2 string except2 string cidr3 string except3 string template string } type genericServiceResource struct { servicename string namespace string protocol string selector string serviceType string ipFamilyPolicy string externalTrafficPolicy string internalTrafficPolicy string template string } type windowGenericServiceResource struct { servicename string namespace string protocol string selector string serviceType string ipFamilyPolicy string externalTrafficPolicy string internalTrafficPolicy string template string } type testPodMultinetwork struct { name string namespace string nodename string nadname string labelname string template string } type externalIPService struct { name string namespace string externalIP string template string } type externalIPPod struct { name string namespace string template string } type nodePortService struct { name string namespace string nodeName string template string } type egressPolicy struct { name string namespace string cidrSelector string template string } type aclSettings struct { DenySetting string `json:"deny"` AllowSetting string `json:"allow"` } type egressrouterMultipleDst struct { name string namespace string reservedip string gateway string destinationip1 string destinationip2 string destinationip3 string template string } type egressrouterRedSDN struct { name string namespace string reservedip string gateway string destinationip string labelkey string labelvalue string template string } type egressFirewall5 struct { name string namespace string ruletype1 string rulename1 string rulevalue1 string protocol1 string portnumber1 int ruletype2 string rulename2 string rulevalue2 string protocol2 string portnumber2 int template string } type egressNetworkpolicy struct { name string namespace string ruletype string rulename string rulevalue string template string } type svcEndpontDetails struct { ovnKubeNodePod string nodeName string podIP string } type migrationDetails struct { name string template string namespace string virtualmachinesintance string } type kubeletKillerPod struct { name string namespace string nodename string template string } type httpserverPodResourceNode struct { name string namespace string containerport int32 hostport int32 nodename string template string } // struct for using nncp to create VF on sriov node type VRFResource struct { name string intfname string nodename string tableid int template string } // struct to create a pod with named port type namedPortPodResource struct { name string namespace string podLabelKey string podLabelVal string portname string containerport int32 template string } func (pod *pingPodResource) createPingPod(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) } func (pod *pingPodResourceNode) createPingPodNode(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "NODENAME="+pod.nodename) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) } func (pod *pingPodResourceWinNode) createPingPodWinNode(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "IMAGE="+pod.image, "NODENAME="+pod.nodename) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) } func (pod *testPodMultinetwork) createTestPodMultinetwork(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "NODENAME="+pod.nodename, "LABELNAME="+pod.labelname, "NADNAME="+pod.nadname) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) } func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error { var configFile string err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) { output, err := oc.Run("process").Args(parameters...).OutputToFile(getRandomString() + "ping-pod.json") if err != nil { e2e.Logf("the err:%v, and try next round", err) return false, nil } configFile = output return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters)) e2e.Logf("the file of resource is %s", configFile) return oc.WithoutNamespace().Run("apply").Args("-f", configFile).Execute() } func (egressIP *egressIPResource1) createEgressIPObject1(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressIP.template, "-p", "NAME="+egressIP.name, "EGRESSIP1="+egressIP.egressIP1, "EGRESSIP2="+egressIP.egressIP2) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressIP %v", egressIP.name)) } func (egressIP *egressIPResource1) deleteEgressIPObject1(oc *exutil.CLI) { removeResource(oc, true, true, "egressip", egressIP.name) } func (egressIP *egressIPResource1) createEgressIPObject2(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressIP.template, "-p", "NAME="+egressIP.name, "EGRESSIP1="+egressIP.egressIP1, "NSLABELKEY="+egressIP.nsLabelKey, "NSLABELVALUE="+egressIP.nsLabelValue, "PODLABELKEY="+egressIP.podLabelKey, "PODLABELVALUE="+egressIP.podLabelValue) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressIP %v", egressIP.name)) } func (egressFirewall *egressFirewall1) createEgressFWObject1(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressFirewall.template, "-p", "NAME="+egressFirewall.name, "NAMESPACE="+egressFirewall.namespace) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressFW %v", egressFirewall.name)) } func (egressFirewall *egressFirewall1) deleteEgressFWObject1(oc *exutil.CLI) { removeResource(oc, true, true, "egressfirewall", egressFirewall.name, "-n", egressFirewall.namespace) } func (egressFirewall *egressFirewall2) createEgressFW2Object(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressFirewall.template, "-p", "NAME="+egressFirewall.name, "NAMESPACE="+egressFirewall.namespace, "RULETYPE="+egressFirewall.ruletype, "CIDR="+egressFirewall.cidr) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressFW2 %v", egressFirewall.name)) } func (EFW *egressFirewall5) createEgressFW5Object(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { parameters := []string{"--ignore-unknown-parameters=true", "-f", EFW.template, "-p", "NAME=" + EFW.name, "NAMESPACE=" + EFW.namespace, "RULETYPE1=" + EFW.ruletype1, "RULENAME1=" + EFW.rulename1, "RULEVALUE1=" + EFW.rulevalue1, "PROTOCOL1=" + EFW.protocol1, "PORTNUMBER1=" + strconv.Itoa(EFW.portnumber1), "RULETYPE2=" + EFW.ruletype2, "RULENAME2=" + EFW.rulename2, "RULEVALUE2=" + EFW.rulevalue2, "PROTOCOL2=" + EFW.protocol2, "PORTNUMBER2=" + strconv.Itoa(EFW.portnumber2)} err1 := applyResourceFromTemplateByAdmin(oc, parameters...) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressFW2 %v", EFW.name)) } func (eNPL *egressNetworkpolicy) createEgressNetworkPolicyObj(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { parameters := []string{"--ignore-unknown-parameters=true", "-f", eNPL.template, "-p", "NAME=" + eNPL.name, "NAMESPACE=" + eNPL.namespace, "RULETYPE=" + eNPL.ruletype, "RULENAME=" + eNPL.rulename, "RULEVALUE=" + eNPL.rulevalue} err1 := applyResourceFromTemplateByAdmin(oc, parameters...) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create EgressNetworkPolicy %v in Namespace %v", eNPL.name, eNPL.namespace)) } // Single CIDR on Dual stack func (ipBlock_policy *ipBlockCIDRsDual) createipBlockCIDRObjectDual(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_policy.template, "-p", "NAME="+ipBlock_policy.name, "NAMESPACE="+ipBlock_policy.namespace, "cidrIpv6="+ipBlock_policy.cidrIpv6, "cidrIpv4="+ipBlock_policy.cidrIpv4) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_policy.name)) } // Single CIDR on single stack func (ipBlock_policy *ipBlockCIDRsSingle) createipBlockCIDRObjectSingle(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_policy.template, "-p", "NAME="+ipBlock_policy.name, "NAMESPACE="+ipBlock_policy.namespace, "CIDR="+ipBlock_policy.cidr) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_policy.name)) } // Single IP Block with except clause on Dual stack func (ipBlock_except_policy *ipBlockCIDRsExceptDual) createipBlockExceptObjectDual(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { policyApplyError := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_except_policy.template, "-p", "NAME="+ipBlock_except_policy.name, "NAMESPACE="+ipBlock_except_policy.namespace, "CIDR_IPv6="+ipBlock_except_policy.cidrIpv6, "EXCEPT_IPv6="+ipBlock_except_policy.cidrIpv6Except, "CIDR_IPv4="+ipBlock_except_policy.cidrIpv4, "EXCEPT_IPv4="+ipBlock_except_policy.cidrIpv4Except) if policyApplyError != nil { e2e.Logf("the err:%v, and try next round", policyApplyError) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_except_policy.name)) } // Single IP Block with except clause on Single stack func (ipBlock_except_policy *ipBlockCIDRsExceptSingle) createipBlockExceptObjectSingle(oc *exutil.CLI, except bool) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { policyApplyError := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_except_policy.template, "-p", "NAME="+ipBlock_except_policy.name, "NAMESPACE="+ipBlock_except_policy.namespace, "CIDR="+ipBlock_except_policy.cidr, "EXCEPT="+ipBlock_except_policy.except) if policyApplyError != nil { e2e.Logf("the err:%v, and try next round", policyApplyError) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_except_policy.name)) } // Function to create ingress or egress policy with multiple CIDRs on Dual Stack Cluster func (ipBlock_cidrs_policy *ipBlockCIDRsDual) createIPBlockMultipleCIDRsObjectDual(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_cidrs_policy.template, "-p", "NAME="+ipBlock_cidrs_policy.name, "NAMESPACE="+ipBlock_cidrs_policy.namespace, "cidrIpv6="+ipBlock_cidrs_policy.cidrIpv6, "cidrIpv4="+ipBlock_cidrs_policy.cidrIpv4, "cidr2Ipv4="+ipBlock_cidrs_policy.cidr2Ipv4, "cidr2Ipv6="+ipBlock_cidrs_policy.cidr2Ipv6, "cidr3Ipv4="+ipBlock_cidrs_policy.cidr3Ipv4, "cidr3Ipv6="+ipBlock_cidrs_policy.cidr3Ipv6) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_cidrs_policy.name)) } // Function to create ingress or egress policy with multiple CIDRs on Single Stack Cluster func (ipBlock_cidrs_policy *ipBlockCIDRsSingle) createIPBlockMultipleCIDRsObjectSingle(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_cidrs_policy.template, "-p", "NAME="+ipBlock_cidrs_policy.name, "NAMESPACE="+ipBlock_cidrs_policy.namespace, "CIDR="+ipBlock_cidrs_policy.cidr, "CIDR2="+ipBlock_cidrs_policy.cidr2, "CIDR3="+ipBlock_cidrs_policy.cidr3) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_cidrs_policy.name)) } func (service *genericServiceResource) createServiceFromParams(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", service.template, "-p", "SERVICENAME="+service.servicename, "NAMESPACE="+service.namespace, "PROTOCOL="+service.protocol, "SELECTOR="+service.selector, "serviceType="+service.serviceType, "ipFamilyPolicy="+service.ipFamilyPolicy, "internalTrafficPolicy="+service.internalTrafficPolicy, "externalTrafficPolicy="+service.externalTrafficPolicy) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create svc %v", service.servicename)) } func (service *windowGenericServiceResource) createWinServiceFromParams(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", service.template, "-p", "SERVICENAME="+service.servicename, "NAMESPACE="+service.namespace, "PROTOCOL="+service.protocol, "SELECTOR="+service.selector, "serviceType="+service.serviceType, "ipFamilyPolicy="+service.ipFamilyPolicy, "internalTrafficPolicy="+service.internalTrafficPolicy, "externalTrafficPolicy="+service.externalTrafficPolicy) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create svc %v", service.servicename)) } func (egressrouter *egressrouterMultipleDst) createEgressRouterMultipeDst(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressrouter.template, "-p", "NAME="+egressrouter.name, "NAMESPACE="+egressrouter.namespace, "RESERVEDIP="+egressrouter.reservedip, "GATEWAY="+egressrouter.gateway, "DSTIP1="+egressrouter.destinationip1, "DSTIP2="+egressrouter.destinationip2, "DSTIP3="+egressrouter.destinationip3) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create egressrouter %v", egressrouter.name)) } func (egressrouter *egressrouterRedSDN) createEgressRouterRedSDN(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressrouter.template, "-p", "NAME="+egressrouter.name, "NAMESPACE="+egressrouter.namespace, "RESERVEDIP="+egressrouter.reservedip, "GATEWAY="+egressrouter.gateway, "DSTIP="+egressrouter.destinationip, "LABELKEY="+egressrouter.labelkey, "LABELVALUE="+egressrouter.labelvalue) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create egressrouter %v", egressrouter.name)) } func (egressFirewall *egressFirewall2) deleteEgressFW2Object(oc *exutil.CLI) { removeResource(oc, true, true, "egressfirewall", egressFirewall.name, "-n", egressFirewall.namespace) } func (pod *pingPodResource) deletePingPod(oc *exutil.CLI) { removeResource(oc, false, true, "pod", pod.name, "-n", pod.namespace) } func (pod *pingPodResourceNode) deletePingPodNode(oc *exutil.CLI) { removeResource(oc, false, true, "pod", pod.name, "-n", pod.namespace) } func removeResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) { output, err := doAction(oc, "delete", asAdmin, withoutNamespace, parameters...) if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) { e2e.Logf("the resource is deleted already") return } o.Expect(err).NotTo(o.HaveOccurred()) err = wait.Poll(3*time.Second, 120*time.Second, func() (bool, error) { output, err := doAction(oc, "get", asAdmin, withoutNamespace, parameters...) if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) { e2e.Logf("the resource is delete successfully") return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to delete resource %v", parameters)) } func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) { if asAdmin && withoutNamespace { return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output() } if asAdmin && !withoutNamespace { return oc.AsAdmin().Run(action).Args(parameters...).Output() } if !asAdmin && withoutNamespace { return oc.WithoutNamespace().Run(action).Args(parameters...).Output() } if !asAdmin && !withoutNamespace { return oc.Run(action).Args(parameters...).Output() } return "", nil } func applyResourceFromTemplateByAdmin(oc *exutil.CLI, parameters ...string) error { var configFile string err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) { output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "resource.json") if err != nil { e2e.Logf("the err:%v, and try next round", err) return false, nil } configFile = output return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("as admin fail to process %v", parameters)) e2e.Logf("the file of resource is %s", configFile) return oc.WithoutNamespace().AsAdmin().Run("apply").Args("-f", configFile).Execute() } func getRandomString() string { chars := "abcdefghijklmnopqrstuvwxyz0123456789" seed := rand.New(rand.NewSource(time.Now().UnixNano())) buffer := make([]byte, 8) for index := range buffer { buffer[index] = chars[seed.Intn(len(chars))] } return string(buffer) } func getPodStatus(oc *exutil.CLI, namespace string, podName string) (string, error) { podStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.phase}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod %s status in namespace %s is %q", podName, namespace, podStatus) return podStatus, err } func checkPodReady(oc *exutil.CLI, namespace string, podName string) (bool, error) { podOutPut, err := getPodStatus(oc, namespace, podName) status := []string{"Running", "Ready", "Complete", "Succeeded"} return contains(status, podOutPut), err } func contains(s []string, str string) bool { for _, v := range s { if v == str { return true } } return false } func waitPodReady(oc *exutil.CLI, namespace string, podName string) { err := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) { status, err1 := checkPodReady(oc, namespace, podName) if err1 != nil { e2e.Logf("the err:%v, wait for pod %v to become ready.", err1, podName) return status, err1 } if !status { return status, nil } return status, nil }) if err != nil { podDescribe := describePod(oc, namespace, podName) e2e.Logf("oc describe pod %v.", podName) e2e.Logf(podDescribe) } exutil.AssertWaitPollNoErr(err, fmt.Sprintf("pod %v is not ready", podName)) } func describePod(oc *exutil.CLI, namespace string, podName string) string { podDescribe, err := oc.WithoutNamespace().Run("describe").Args("pod", "-n", namespace, podName).Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod %s status is %q", podName, podDescribe) return podDescribe } func execCommandInSpecificPod(oc *exutil.CLI, namespace string, podName string, command string) (string, error) { e2e.Logf("The command is: %v", command) command1 := []string{"-n", namespace, podName, "--", "bash", "-c", command} msg, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(command1...).Output() if err != nil { e2e.Logf("Execute command failed with err:%v and output is %v.", err, msg) return msg, err } o.Expect(err).NotTo(o.HaveOccurred()) return msg, nil } func execCommandInNetworkingPod(oc *exutil.CLI, command string) (string, error) { var cmd []string podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-ovn-kubernetes", "-l", "app=ovnkube-node", "-o=jsonpath={.items[0].metadata.name}").Output() if err != nil { e2e.Logf("Cannot get ovn-kubernetes pods, errors: %v", err) return "", err } cmd = []string{"-n", "openshift-ovn-kubernetes", "-c", "ovnkube-controller", podName, "--", "/bin/sh", "-c", command} msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output() if err != nil { e2e.Logf("Execute command failed with err:%v .", err) return "", err } o.Expect(err).NotTo(o.HaveOccurred()) return msg, nil } func getDefaultInterface(oc *exutil.CLI) (string, error) { getDefaultInterfaceCmd := "/usr/sbin/ip -4 route show default" int1, err := execCommandInNetworkingPod(oc, getDefaultInterfaceCmd) if err != nil { e2e.Logf("Cannot get default interface, errors: %v", err) return "", err } defInterface := strings.Split(int1, " ")[4] e2e.Logf("Get the default inteface: %s", defInterface) return defInterface, nil } func getDefaultSubnet(oc *exutil.CLI) (string, error) { int1, _ := getDefaultInterface(oc) getDefaultSubnetCmd := "/usr/sbin/ip -4 -brief a show " + int1 subnet1, err := execCommandInNetworkingPod(oc, getDefaultSubnetCmd) defSubnet := strings.Fields(subnet1)[2] if err != nil { e2e.Logf("Cannot get default subnet, errors: %v", err) return "", err } e2e.Logf("Get the default subnet: %s", defSubnet) return defSubnet, nil } // Hosts function return the host network CIDR func Hosts(cidr string) ([]string, error) { ip, ipnet, err := net.ParseCIDR(cidr) e2e.Logf("in Hosts function, ip: %v, ipnet: %v", ip, ipnet) if err != nil { return nil, err } var ips []string for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { ips = append(ips, ip.String()) } // remove network address and broadcast address return ips[1 : len(ips)-1], nil } func inc(ip net.IP) { for j := len(ip) - 1; j >= 0; j-- { ip[j]++ if ip[j] > 0 { break } } } func findUnUsedIPs(oc *exutil.CLI, cidr string, number int) []string { ipRange, _ := Hosts(cidr) var ipUnused = []string{} //shuffle the ips slice rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(ipRange), func(i, j int) { ipRange[i], ipRange[j] = ipRange[j], ipRange[i] }) for _, ip := range ipRange { if len(ipUnused) < number { pingCmd := "ping -c4 -t1 " + ip _, err := execCommandInNetworkingPod(oc, pingCmd) if err != nil { e2e.Logf("%s is not used!\n", ip) ipUnused = append(ipUnused, ip) } } else { break } } return ipUnused } func ipEchoServer() string { return "172.31.249.80:9095" } func checkPlatform(oc *exutil.CLI) string { output, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output() return strings.ToLower(output) } func checkNetworkType(oc *exutil.CLI) string { output, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.type}").Output() return strings.ToLower(output) } func getDefaultIPv6Subnet(oc *exutil.CLI) (string, error) { int1, _ := getDefaultInterface(oc) getDefaultSubnetCmd := "/usr/sbin/ip -6 -brief a show " + int1 subnet1, err := execCommandInNetworkingPod(oc, getDefaultSubnetCmd) if err != nil { e2e.Logf("Cannot get default ipv6 subnet, errors: %v", err) return "", err } defSubnet := strings.Fields(subnet1)[2] e2e.Logf("Get the default ipv6 subnet: %s", defSubnet) return defSubnet, nil } func findUnUsedIPv6(oc *exutil.CLI, cidr string, number int) ([]string, error) { ip, ipnet, err := net.ParseCIDR(cidr) if err != nil { return nil, err } number += 2 var ips []string var i = 0 for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { //Not use the first two IPv6 addresses , such as 2620:52:0:4e:: , 2620:52:0:4e::1 if i == 0 || i == 1 { i++ continue } //Start to detect the IPv6 adress is used or not if i < number { pingCmd := "ping -c4 -t1 -6 " + ip.String() _, err := execCommandInNetworkingPod(oc, pingCmd) if err != nil { e2e.Logf("%s is not used!\n", ip) ips = append(ips, ip.String()) i++ } } else { break } } return ips, nil } func ipv6EchoServer(isIPv6 bool) string { if isIPv6 { return "[2620:52:0:4974:def4:1ff:fee7:8144]:8085" } return "10.73.116.56:8085" } func checkIPStackType(oc *exutil.CLI) string { svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 { return "dualstack" } else if strings.Count(svcNetwork, ":") >= 2 { return "ipv6single" } else if strings.Count(svcNetwork, ".") >= 2 { return "ipv4single" } return "" } func installSctpModule(oc *exutil.CLI, configFile string) { status, _ := oc.AsAdmin().Run("get").Args("machineconfigs").Output() if !strings.Contains(status, "load-sctp-module") { err := oc.WithoutNamespace().AsAdmin().Run("create").Args("-f", configFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } } func checkSctpModule(oc *exutil.CLI, nodeName, namespace string) { defer exutil.RecoverNamespaceRestricted(oc, namespace) exutil.SetNamespacePrivileged(oc, namespace) err := wait.Poll(30*time.Second, 15*time.Minute, func() (bool, error) { // Check nodes status to make sure all nodes are up after rebooting caused by load-sctp-module nodesStatus, err := oc.AsAdmin().Run("get").Args("node").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("oc_get_nodes: %v", nodesStatus) status, _ := oc.AsAdmin().Run("debug").Args("node/"+nodeName, "--", "cat", "/sys/module/sctp/initstate").Output() if strings.Contains(status, "live") { e2e.Logf("stcp module is installed in the %s", nodeName) return true, nil } return false, nil }) exutil.AssertWaitPollNoErr(err, "stcp module is installed in the nodes") } func getPodIPv4(oc *exutil.CLI, namespace string, podName string) string { podIPv4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv4) return podIPv4 } func getPodIPv6(oc *exutil.CLI, namespace string, podName string, ipStack string) string { if ipStack == "ipv6single" { podIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv6) return podIPv6 } else if ipStack == "dualstack" { podIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[1].ip}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIPv6) return podIPv6 } return "" } // For normal user to create resources in the specified namespace from the file (not template) func createResourceFromFile(oc *exutil.CLI, ns, file string) { err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } func waitForPodWithLabelReady(oc *exutil.CLI, ns, label string) error { return wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output() e2e.Logf("the Ready status of pod is %v", status) if err != nil || status == "" { e2e.Logf("failed to get pod status: %v, retrying...", err) return false, nil } if strings.Contains(status, "False") { e2e.Logf("the pod Ready status not met; wanted True but got %v, retrying...", status) return false, nil } return true, nil }) } func waitForPodWithLabelGone(oc *exutil.CLI, ns, label string) error { errWait := wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) { podsOutput, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output() if strings.Contains(podsOutput, "NotFound") || strings.Contains(podsOutput, "No resources found") { e2e.Logf("the resource is deleted already") return true, nil } e2e.Logf("Wait for pods to be deleted, retrying...") return false, nil }) if errWait != nil { return fmt.Errorf("case: %v\nerror: %s", g.CurrentSpecReport().FullText(), fmt.Sprintf("pod with lable %v in ns %v is not gone", label, ns)) } return nil } func getSvcIPv4(oc *exutil.CLI, namespace string, svcName string) string { svcIPv4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IPv4 in namespace %s is %q", svcName, namespace, svcIPv4) return svcIPv4 } func getSvcIPv6(oc *exutil.CLI, namespace string, svcName string) string { svcIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[1]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IPv6 in namespace %s is %q", svcName, namespace, svcIPv6) return svcIPv6 } func getSvcIPv6SingleStack(oc *exutil.CLI, namespace string, svcName string) string { svcIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IPv6 in namespace %s is %q", svcName, namespace, svcIPv6) return svcIPv6 } func getSvcIPdualstack(oc *exutil.CLI, namespace string, svcName string) (string, string) { svcIPv4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IPv4 in namespace %s is %q", svcName, namespace, svcIPv4) svcIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[1]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IPv6 in namespace %s is %q", svcName, namespace, svcIPv6) return svcIPv4, svcIPv6 } // check if a configmap is created in specific namespace [usage: checkConfigMap(oc, namesapce, configmapName)] func checkConfigMap(oc *exutil.CLI, ns, configmapName string) error { return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) { searchOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns).Output() if err != nil { e2e.Logf("failed to get configmap: %v", err) return false, nil } if o.Expect(searchOutput).To(o.ContainSubstring(configmapName)) { e2e.Logf("configmap %v found", configmapName) return true, nil } return false, nil }) } func sshRunCmd(host string, user string, cmd string) error { privateKey := os.Getenv("SSH_CLOUD_PRIV_KEY") if privateKey == "" { privateKey = "../internal/config/keys/openshift-qe.pem" } sshClient := exutil.SshClient{User: user, Host: host, Port: 22, PrivateKey: privateKey} return sshClient.Run(cmd) } // For Admin to patch a resource in the specified namespace func patchResourceAsAdmin(oc *exutil.CLI, resource, patch string, nameSpace ...string) { var cargs []string if len(nameSpace) > 0 { cargs = []string{resource, "-p", patch, "-n", nameSpace[0], "--type=merge"} } else { cargs = []string{resource, "-p", patch, "--type=merge"} } err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(cargs...).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } // Check network operator status in intervals until timeout func checkNetworkOperatorState(oc *exutil.CLI, interval int, timeout int) { errCheck := wait.Poll(time.Duration(interval)*time.Second, time.Duration(timeout)*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "network").Output() if err != nil { e2e.Logf("Fail to get clusteroperator network, error:%s. Trying again", err) return false, nil } matched, _ := regexp.MatchString("True.*False.*False", output) e2e.Logf("Network operator state is:%s", output) o.Expect(matched).To(o.BeTrue()) return false, nil }) o.Expect(errCheck.Error()).To(o.ContainSubstring("timed out waiting for the condition")) } func getNodeIPv4(oc *exutil.CLI, namespace, nodeName string) string { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", oc.Namespace(), "node", nodeName, "-o=jsonpath={.status.addresses[?(@.type==\"InternalIP\")].address}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if err != nil { e2e.Logf("Cannot get node default interface ipv4 address, errors: %v", err) } // when egressIP is applied to a node, it would be listed as internal IP for the node, thus, there could be more than one IPs shown as internal IP // use RE to match out to first internal IP re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) nodeipv4 := re.FindAllString(output, -1)[0] e2e.Logf("The IPv4 of node's default interface is %q", nodeipv4) return nodeipv4 } // Return IPv6 and IPv4 in vars respectively for Dual Stack and IPv4/IPv6 in 2nd var for single stack Clusters, and var1 will be nil in those cases func getNodeIP(oc *exutil.CLI, nodeName string) (string, string) { ipStack := checkIPStackType(oc) if (ipStack == "ipv6single") || (ipStack == "ipv4single") { e2e.Logf("Its a Single Stack Cluster, either IPv4 or IPv6") InternalIP, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[?(@.type==\"InternalIP\")].address}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The node's Internal IP is %q", InternalIP) return "", InternalIP } e2e.Logf("Its a Dual Stack Cluster") InternalIP1, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[0].address}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The node's 1st Internal IP is %q", InternalIP1) InternalIP2, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[1].address}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The node's 2nd Internal IP is %q", InternalIP2) if netutils.IsIPv6String(InternalIP1) { return InternalIP1, InternalIP2 } return InternalIP2, InternalIP1 } // get CLuster Manager's leader info func getLeaderInfo(oc *exutil.CLI, namespace string, cmName string, networkType string) string { if networkType == "ovnkubernetes" { linuxNodeList, err := exutil.GetAllNodesbyOSType(oc, "linux") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(linuxNodeList).NotTo(o.BeEmpty()) podName, getPodNameErr := exutil.GetPodName(oc, namespace, cmName, linuxNodeList[0]) o.Expect(getPodNameErr).NotTo(o.HaveOccurred()) o.Expect(podName).NotTo(o.BeEmpty()) return podName } output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "openshift-network-controller", "-n", namespace, "-o=jsonpath={.metadata.annotations.control-plane\\.alpha\\.kubernetes\\.io\\/leader}").Output() o.Expect(err).NotTo(o.HaveOccurred()) var sdnAnnotations map[string]interface{} json.Unmarshal([]byte(output), &sdnAnnotations) leaderNodeName := sdnAnnotations["holderIdentity"].(string) o.Expect(leaderNodeName).NotTo(o.BeEmpty()) ocGetPods, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-sdn", "pod", "-l app=sdn", "-o=wide").OutputToFile("ocgetpods.txt") defer os.RemoveAll(ocGetPods) o.Expect(podErr).NotTo(o.HaveOccurred()) rawGrepOutput, rawGrepErr := exec.Command("bash", "-c", "cat "+ocGetPods+" | grep "+leaderNodeName+" | awk '{print $1}'").Output() o.Expect(rawGrepErr).NotTo(o.HaveOccurred()) leaderPodName := strings.TrimSpace(string(rawGrepOutput)) e2e.Logf("The leader Pod's name: %v", leaderPodName) return leaderPodName } func checkSDNMetrics(oc *exutil.CLI, url string, metrics string) { var metricsOutput []byte var metricsLog []byte olmToken, err := exutil.GetSAToken(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(olmToken).NotTo(o.BeEmpty()) metricsErr := wait.Poll(5*time.Second, 10*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", olmToken), fmt.Sprintf("%s", url)).OutputToFile("metrics.txt") if err != nil { e2e.Logf("Can't get metrics and try again, the error is:%s", err) return false, nil } metricsLog, _ = exec.Command("bash", "-c", "cat "+output+" ").Output() metricsString := string(metricsLog) if strings.Contains(metricsString, "ovnkube_controller_pod") { metricsOutput, _ = exec.Command("bash", "-c", "cat "+output+" | grep "+metrics+" | awk 'NR==1{print $2}'").Output() } else { metricsOutput, _ = exec.Command("bash", "-c", "cat "+output+" | grep "+metrics+" | awk 'NR==3{print $2}'").Output() } metricsValue := strings.TrimSpace(string(metricsOutput)) if metricsValue != "" { e2e.Logf("The output of the metrics for %s is : %v", metrics, metricsValue) } else { e2e.Logf("Can't get metrics for %s:", metrics) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr)) } func getEgressCIDRs(oc *exutil.CLI, node string) string { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostsubnet", node, "-o=jsonpath={.egressCIDRs}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("egressCIDR for hostsubnet node %v is: %v", node, output) return output } // get egressIP from a node // When they are multiple egressIPs on the node, egressIp list is in format of ["10.0.247.116","10.0.156.51"] // as an example from the output of command "oc get hostsubnet <node> -o=jsonpath={.egressIPs}" // convert the iplist into an array of ip addresses func getEgressIPByKind(oc *exutil.CLI, kind string, kindName string, expectedNum int) ([]string, error) { var ip = []string{} iplist, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, kindName, "-o=jsonpath={.egressIPs}").Output() isIPListEmpty := (iplist == "" || iplist == "[]") if expectedNum == 0 { // Add waiting time for egressIP removed egressIPEmptyErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { iplist, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, kindName, "-o=jsonpath={.egressIPs}").Output() if iplist == "" || iplist == "[]" { e2e.Logf("EgressIP list is empty") return true, nil } e2e.Logf("EgressIP list is %s, not removed, or have err:%v, and try next round", iplist, err) return false, nil }) return ip, egressIPEmptyErr } if !isIPListEmpty && iplist != "[]" { ip = strings.Split(iplist[2:len(iplist)-2], "\",\"") } if isIPListEmpty || len(ip) < expectedNum || err != nil { err = wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { iplist, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, kindName, "-o=jsonpath={.egressIPs}").Output() if len(iplist) > 0 && iplist != "[]" { ip = strings.Split(iplist[2:len(iplist)-2], "\",\"") } if len(ip) < expectedNum || err != nil { e2e.Logf("only got %d egressIP, or have err:%v, and try next round", len(ip), err) return false, nil } if len(iplist) > 0 && len(ip) == expectedNum { e2e.Logf("Found egressIP list for %v %v is: %v", kind, kindName, iplist) return true, nil } return false, nil }) e2e.Logf("Only got %d egressIP, or have err:%v", len(ip), err) return ip, err } return ip, nil } func getPodName(oc *exutil.CLI, namespace string, label string) []string { var podName []string podNameAll, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pod", "-l", label, "-ojsonpath={.items..metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) podName = strings.Split(podNameAll, " ") e2e.Logf("The pod(s) are %v ", podName) return podName } // starting from first node, compare its subnet with subnet of subsequent nodes in the list // until two nodes with same subnet found, otherwise, return false to indicate that no two nodes with same subnet found func findTwoNodesWithSameSubnet(oc *exutil.CLI, nodeList *v1.NodeList) (bool, [2]string) { var nodes [2]string for i := 0; i < (len(nodeList.Items) - 1); i++ { for j := i + 1; j < len(nodeList.Items); j++ { firstSub := getIfaddrFromNode(nodeList.Items[i].Name, oc) secondSub := getIfaddrFromNode(nodeList.Items[j].Name, oc) if firstSub == secondSub { e2e.Logf("Found nodes with same subnet.") nodes[0] = nodeList.Items[i].Name nodes[1] = nodeList.Items[j].Name return true, nodes } } } return false, nodes } func getSDNMetrics(oc *exutil.CLI, podName string) string { var metricsLog string metricsErr := wait.Poll(5*time.Second, 10*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-sdn", fmt.Sprintf("%s", podName), "--", "curl", "localhost:29100/metrics").OutputToFile("metrics.txt") if err != nil { e2e.Logf("Can't get metrics and try again, the error is:%s", err) return false, nil } metricsLog = output return true, nil }) exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr)) return metricsLog } func getOVNMetrics(oc *exutil.CLI, url string) string { var metricsLog string olmToken, err := exutil.GetSAToken(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(olmToken).NotTo(o.BeEmpty()) metricsErr := wait.Poll(5*time.Second, 10*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", olmToken), fmt.Sprintf("%s", url)).OutputToFile("metrics.txt") if err != nil { e2e.Logf("Can't get metrics and try again, the error is:%s", err) return false, nil } metricsLog = output return true, nil }) exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr)) return metricsLog } func checkIPsec(oc *exutil.CLI) string { output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.ovnKubernetesConfig.ipsecConfig.mode}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if output == "" { // if have {} in 4.15+, that means it upgraded from previous version and with ipsec enabled. output, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.ovnKubernetesConfig.ipsecConfig}").Output() o.Expect(err).NotTo(o.HaveOccurred()) } e2e.Logf("The ipsec state is === %v ===", output) return output } func getAssignedEIPInEIPObject(oc *exutil.CLI, egressIPObject string) []map[string]string { timeout := estimateTimeoutForEgressIP(oc) var egressIPs string egressipErr := wait.Poll(10*time.Second, timeout, func() (bool, error) { egressIPStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressip", egressIPObject, "-ojsonpath={.status.items}").Output() if err != nil { e2e.Logf("Wait to get EgressIP object applied,try next round. %v", err) return false, nil } if egressIPStatus == "" { e2e.Logf("Wait to get EgressIP object applied,try next round. %v", err) return false, nil } egressIPs = egressIPStatus e2e.Logf("egressIPStatus: %v", egressIPs) return true, nil }) exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to apply egressIPs:%s", egressipErr)) var egressIPJsonMap []map[string]string json.Unmarshal([]byte(egressIPs), &egressIPJsonMap) e2e.Logf("egressIPJsonMap:%v", egressIPJsonMap) return egressIPJsonMap } func rebootNode(oc *exutil.CLI, nodeName string) { e2e.Logf("\nRebooting node %s....", nodeName) _, err1 := exutil.DebugNodeWithChroot(oc, nodeName, "shutdown", "-r", "+1") o.Expect(err1).NotTo(o.HaveOccurred()) } func checkNodeStatus(oc *exutil.CLI, nodeName string, expectedStatus string) { var expectedStatus1 string if expectedStatus == "Ready" { expectedStatus1 = "True" } else if expectedStatus == "NotReady" { expectedStatus1 = "Unknown" } else { err1 := fmt.Errorf("TBD supported node status") o.Expect(err1).NotTo(o.HaveOccurred()) } err := wait.Poll(5*time.Second, 15*time.Minute, func() (bool, error) { statusOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", nodeName, "-ojsonpath={.status.conditions[-1].status}").Output() if err != nil { e2e.Logf("\nGet node status with error : %v", err) return false, nil } e2e.Logf("Expect Node %s in state %v, kubelet status is %s", nodeName, expectedStatus, statusOutput) if statusOutput != expectedStatus1 { return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Node %s is not in expected status %s", nodeName, expectedStatus)) } func updateEgressIPObject(oc *exutil.CLI, egressIPObjectName string, egressIP string) { patchResourceAsAdmin(oc, "egressip/"+egressIPObjectName, "{\"spec\":{\"egressIPs\":[\""+egressIP+"\"]}}") egressipErr := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) { output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("egressip", egressIPObjectName, "-o=jsonpath={.status.items[*]}").Output() if err != nil { e2e.Logf("Wait to get EgressIP object applied,try next round. %v", err) return false, nil } if !strings.Contains(output, egressIP) { e2e.Logf("Wait for new IP %s applied,try next round.", egressIP) e2e.Logf(output) return false, nil } e2e.Logf(output) return true, nil }) exutil.AssertWaitPollNoErr(egressipErr, fmt.Sprintf("Failed to apply new egressIP %s:%v", egressIP, egressipErr)) } func getTwoNodesSameSubnet(oc *exutil.CLI, nodeList *v1.NodeList) (bool, []string) { var egressNodes []string if len(nodeList.Items) < 2 { e2e.Logf("Not enough nodes available for the test, skip the case!!") return false, nil } platform := exutil.CheckPlatform(oc) if strings.Contains(platform, "aws") { e2e.Logf("find the two nodes that have same subnet") check, nodes := findTwoNodesWithSameSubnet(oc, nodeList) if check { egressNodes = nodes[:2] } else { e2e.Logf("No more than 2 worker nodes in same subnet, skip the test!!!") return false, nil } } else { e2e.Logf("since worker nodes all have same subnet, just pick first two nodes as egress nodes") egressNodes = append(egressNodes, nodeList.Items[0].Name) egressNodes = append(egressNodes, nodeList.Items[1].Name) } return true, egressNodes } /* getSvcIP returns IPv6 and IPv4 in vars in order on dual stack respectively and main Svc IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var. LoadBalancer svc will return Ingress VIP in var1, v4 or v6 and NodePort svc will return Ingress SvcIP in var1 and NodePort in var2 */ func getSvcIP(oc *exutil.CLI, namespace string, svcName string) (string, string) { ipStack := checkIPStackType(oc) svctype, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.type}").Output() o.Expect(err).NotTo(o.HaveOccurred()) ipFamilyType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ipFamilyPolicy}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if (svctype == "ClusterIP") || (svctype == "NodePort") { if (ipStack == "ipv6single") || (ipStack == "ipv4single") { svcIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if svctype == "ClusterIP" { e2e.Logf("The service %s IP in namespace %s is %q", svcName, namespace, svcIP) return svcIP, "" } nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The NodePort service %s IP and NodePort in namespace %s is %s %s", svcName, namespace, svcIP, nodePort) return svcIP, nodePort } else if (ipStack == "dualstack" && ipFamilyType == "PreferDualStack") || (ipStack == "dualstack" && ipFamilyType == "RequireDualStack") { ipFamilyPrecedence, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ipFamilies[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) //if IPv4 is listed first in ipFamilies then clustrIPs allocation will take order as Ipv4 first and then Ipv6 else reverse svcIPv4, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IP in namespace %s is %q", svcName, namespace, svcIPv4) svcIPv6, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[1]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IP in namespace %s is %q", svcName, namespace, svcIPv6) /*As stated Nodeport type svc will return node port value in 2nd var. We don't care about what svc address is coming in 1st var as we evetually going to get node IPs later and use that in curl operation to node_ip:nodeport*/ if ipFamilyPrecedence == "IPv4" { e2e.Logf("The ipFamilyPrecedence is Ipv4, Ipv6") switch svctype { case "NodePort": nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The Dual Stack NodePort service %s IP and NodePort in namespace %s is %s %s", svcName, namespace, svcIPv4, nodePort) return svcIPv4, nodePort default: return svcIPv6, svcIPv4 } } else { e2e.Logf("The ipFamilyPrecedence is Ipv6, Ipv4") switch svctype { case "NodePort": nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ports[*].nodePort}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The Dual Stack NodePort service %s IP and NodePort in namespace %s is %s %s", svcName, namespace, svcIPv6, nodePort) return svcIPv6, nodePort default: svcIPv4, svcIPv6 = svcIPv6, svcIPv4 return svcIPv6, svcIPv4 } } } else { //Its a Dual Stack Cluster with SingleStack ipFamilyPolicy svcIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.clusterIPs[0]}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The service %s IP in namespace %s is %q", svcName, namespace, svcIP) return svcIP, "" } } else { //Loadbalancer will be supported for single stack Ipv4 here for mostly GCP,Azure. We can take further enhancements wrt Metal platforms in Metallb utils later e2e.Logf("The serviceType is LoadBalancer") platform := exutil.CheckPlatform(oc) var jsonString string if platform == "aws" { jsonString = "-o=jsonpath={.status.loadBalancer.ingress[0].hostname}" } else { jsonString = "-o=jsonpath={.status.loadBalancer.ingress[0].ip}" } err := wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) { svcIP, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, jsonString).Output() o.Expect(er).NotTo(o.HaveOccurred()) if svcIP == "" { e2e.Logf("Waiting for lb service IP assignment. Trying again...") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to assign lb svc IP to %v", svcName)) lbSvcIP, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, jsonString).Output() e2e.Logf("The %s lb service Ingress VIP in namespace %s is %q", svcName, namespace, lbSvcIP) return lbSvcIP, "" } } // getPodIP returns IPv6 and IPv4 in vars in order on dual stack respectively and main IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var func getPodIP(oc *exutil.CLI, namespace string, podName string) (string, string) { ipStack := checkIPStackType(oc) if (ipStack == "ipv6single") || (ipStack == "ipv4single") { podIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod %s IP in namespace %s is %q", podName, namespace, podIP) return podIP, "" } else if ipStack == "dualstack" { podIP1, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[1].ip}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod's %s 1st IP in namespace %s is %q", podName, namespace, podIP1) podIP2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.podIPs[0].ip}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The pod's %s 2nd IP in namespace %s is %q", podName, namespace, podIP2) if netutils.IsIPv6String(podIP1) { e2e.Logf("This is IPv4 primary dual stack cluster") return podIP1, podIP2 } e2e.Logf("This is IPv6 primary dual stack cluster") return podIP2, podIP1 } return "", "" } // CurlPod2PodPass checks connectivity across pods regardless of network addressing type on cluster func CurlPod2PodPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) { podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst) if podIP2 != "" { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080")) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080")) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080")) o.Expect(err).NotTo(o.HaveOccurred()) } } // CurlPod2PodFail ensures no connectivity from a pod to pod regardless of network addressing type on cluster func CurlPod2PodFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) { podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst) if podIP2 != "" { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080")) o.Expect(err).To(o.HaveOccurred()) _, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080")) o.Expect(err).To(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080")) o.Expect(err).To(o.HaveOccurred()) } } // CurlNode2PodPass checks node to pod connectivity regardless of network addressing type on cluster func CurlNode2PodPass(oc *exutil.CLI, nodeName string, namespace string, podName string) { //getPodIP returns IPv6 and IPv4 in order on dual stack in PodIP1 and PodIP2 respectively and main IP in case of single stack (v4 or v6) in PodIP1, and nil in PodIP2 podIP1, podIP2 := getPodIP(oc, namespace, podName) if podIP2 != "" { podv6URL := net.JoinHostPort(podIP1, "8080") podv4URL := net.JoinHostPort(podIP2, "8080") _, err := exutil.DebugNode(oc, nodeName, "curl", podv4URL, "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = exutil.DebugNode(oc, nodeName, "curl", podv6URL, "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } else { podURL := net.JoinHostPort(podIP1, "8080") _, err := exutil.DebugNode(oc, nodeName, "curl", podURL, "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } } // CurlNode2SvcPass checks node to svc connectivity regardless of network addressing type on cluster func CurlNode2SvcPass(oc *exutil.CLI, nodeName string, namespace string, svcName string) { svcIP1, svcIP2 := getSvcIP(oc, namespace, svcName) if svcIP2 != "" { svc6URL := net.JoinHostPort(svcIP1, "27017") svc4URL := net.JoinHostPort(svcIP2, "27017") _, err := exutil.DebugNode(oc, nodeName, "curl", svc4URL, "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = exutil.DebugNode(oc, nodeName, "curl", svc6URL, "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } else { svcURL := net.JoinHostPort(svcIP1, "27017") _, err := exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5") o.Expect(err).NotTo(o.HaveOccurred()) } } // CurlNode2SvcFail checks node to svc connectivity regardless of network addressing type on cluster func CurlNode2SvcFail(oc *exutil.CLI, nodeName string, namespace string, svcName string) { svcIP1, svcIP2 := getSvcIP(oc, namespace, svcName) if svcIP2 != "" { svc6URL := net.JoinHostPort(svcIP1, "27017") svc4URL := net.JoinHostPort(svcIP2, "27017") output, _ := exutil.DebugNode(oc, nodeName, "curl", svc4URL, "--connect-timeout", "5") o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed"))) output, _ = exutil.DebugNode(oc, nodeName, "curl", svc6URL, "--connect-timeout", "5") o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed"))) } else { svcURL := net.JoinHostPort(svcIP1, "27017") output, _ := exutil.DebugNode(oc, nodeName, "curl", svcURL, "--connect-timeout", "5") o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed"))) } } // CurlPod2SvcPass checks pod to svc connectivity regardless of network addressing type on cluster func CurlPod2SvcPass(oc *exutil.CLI, namespaceSrc string, namespaceSvc string, podNameSrc string, svcName string) { svcIP1, svcIP2 := getSvcIP(oc, namespaceSvc, svcName) if svcIP2 != "" { _, err := e2eoutput.RunHostCmdWithRetries(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 3*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmdWithRetries(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP2, "27017"), 3*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmdWithRetries(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017"), 3*time.Second, 15*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } } // CurlPod2SvcFail ensures no connectivity from a pod to svc regardless of network addressing type on cluster func CurlPod2SvcFail(oc *exutil.CLI, namespaceSrc string, namespaceSvc string, podNameSrc string, svcName string) { svcIP1, svcIP2 := getSvcIP(oc, namespaceSvc, svcName) if svcIP2 != "" { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017")) o.Expect(err).To(o.HaveOccurred()) _, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP2, "27017")) o.Expect(err).To(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP1, "27017")) o.Expect(err).To(o.HaveOccurred()) } } func checkProxy(oc *exutil.CLI) bool { httpProxy, err := doAction(oc, "get", true, true, "proxy", "cluster", "-o=jsonpath={.status.httpProxy}") o.Expect(err).NotTo(o.HaveOccurred()) httpsProxy, err := doAction(oc, "get", true, true, "proxy", "cluster", "-o=jsonpath={.status.httpsProxy}") o.Expect(err).NotTo(o.HaveOccurred()) if httpProxy != "" || httpsProxy != "" { return true } return false } // SDNHostwEgressIP find out which egress node has the egressIP func SDNHostwEgressIP(oc *exutil.CLI, node []string, egressip string) string { var ip []string var foundHost string for i := 0; i < len(node); i++ { iplist, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostsubnet", node[i], "-o=jsonpath={.egressIPs}").Output() e2e.Logf("iplist for node %v: %v", node, iplist) if iplist != "" && iplist != "[]" { ip = strings.Split(iplist[2:len(iplist)-2], "\",\"") } if iplist == "" || iplist == "[]" || err != nil { err = wait.Poll(30*time.Second, 3*time.Minute, func() (bool, error) { iplist, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("hostsubnet", node[i], "-o=jsonpath={.egressIPs}").Output() if iplist != "" && iplist != "[]" { e2e.Logf("Found egressIP list for node %v is: %v", node, iplist) ip = strings.Split(iplist[2:len(iplist)-2], "\",\"") return true, nil } if err != nil { e2e.Logf("only got %d egressIP, or have err:%v, and try next round", len(ip), err) return false, nil } return false, nil }) } if isValueInList(egressip, ip) { foundHost = node[i] break } } return foundHost } func isValueInList(value string, list []string) bool { for _, v := range list { if v == value { return true } } return false } // getPodMultiNetwork is designed to get both v4 and v6 addresses from pod's secondary interface(net1) which is not in the cluster's SDN or OVN network func getPodMultiNetwork(oc *exutil.CLI, namespace string, podName string) (string, string) { cmd1 := "ip -o -4 addr show dev net1 | awk '$3 == \"inet\" {print $4}' | cut -d'/' -f1" cmd2 := "ip -o -6 addr show dev net1 | awk '$3 == \"inet6\" {print $4}' | head -1 | cut -d'/' -f1" podIPv4, err := e2eoutput.RunHostCmdWithRetries(namespace, podName, cmd1, 2*time.Second, 10*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("pod net1 ipv4 is: %s", podIPv4) o.Expect(podIPv4).NotTo(o.BeNil()) podipv4 := strings.TrimSpace(podIPv4) podIPv6, err1 := e2eoutput.RunHostCmdWithRetries(namespace, podName, cmd2, 2*time.Second, 10*time.Second) o.Expect(err1).NotTo(o.HaveOccurred()) e2e.Logf("pod net1 ipv6 is: %s", podIPv6) o.Expect(podIPv6).NotTo(o.BeNil()) podipv6 := strings.TrimSpace(podIPv6) e2e.Logf("The v4 address of %s is: %v", podName, podipv4) e2e.Logf("The v4 address of %s is: %v", podName, podipv6) return podipv4, podipv6 } // Pinging pod's secondary interfaces should pass func curlPod2PodMultiNetworkPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPv4 string, podIPv6 string) { // Poll to check IPv4 connectivity err := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { msg, _ := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+podIPv4+":8080 --connect-timeout 5") if !strings.Contains(msg, "Hello OpenShift!") { e2e.Logf("The curl should pass but fail, and try next round") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Test fail with err:%s", err)) // Poll to check IPv6 connectivity err1 := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { msg1, _ := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -g -6 ["+podIPv6+"]:8080 --connect-timeout 5") if !strings.Contains(msg1, "Hello OpenShift!") { e2e.Logf("The curl should pass but fail, and try next round") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err1, fmt.Sprintf("Test fail with err:%s", err1)) } // Pinging pod's secondary interfaces should fail func curlPod2PodMultiNetworkFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPv4 string, podIPv6 string) { // Poll to check IPv4 connectivity err := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { msg, _ := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+podIPv4+":8080 --connect-timeout 5") if strings.Contains(msg, "Hello OpenShift!") { e2e.Logf("The curl should fail but pass, and try next round") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Test fail with err:%s", err)) // Poll to check IPv6 connectivity err1 := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { msg1, _ := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -g -6 ["+podIPv6+"]:8080 --connect-timeout 5") if strings.Contains(msg1, "Hello OpenShift!") { e2e.Logf("The curl should fail but pass, and try next round") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err1, fmt.Sprintf("Test fail with err:%s", err1)) } // This function is for testing MultiNetwork with IPBlock policy only func curlPod2PodMultiNetworkIPBlockPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPv4 string, podIPv6 string) { err := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { msg, _ := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+podIPv4+":8080 --connect-timeout 5") if !strings.Contains(msg, "Hello OpenShift!") { e2e.Logf("The curl should pass but fail, and try next round") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Test fail with err:%s", err)) } // This function is for testing MultiNetwork with IPBlock policy only func curlPod2PodMultiNetworkIPBlockFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, podIPv4 string, podIPv6 string) { err := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { msg, _ := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+podIPv4+":8080 --connect-timeout 5") if strings.Contains(msg, "Hello OpenShift!") { e2e.Logf("The curl should fail but pass, and try next round") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Test fail with err:%s", err)) } // This function will bring 2 namespaces, 5 pods and 2 NADs for all multus multinetworkpolicy cases func prepareMultinetworkTest(oc *exutil.CLI, ns1 string, ns2 string, patchInfo string) { buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy") netAttachDefFile1 := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-NAD1.yaml") netAttachDefFile2 := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-NAD2.yaml") pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml") patchSResource := "networks.operator.openshift.io/cluster" exutil.By("Enable MacvlanNetworkpolicy in the cluster") patchResourceAsAdmin(oc, patchSResource, patchInfo) waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False") waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False") exutil.By("Create MultiNetworkPolicy-NAD1 in ns1") err1 := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile1, "-n", ns1).Execute() o.Expect(err1).NotTo(o.HaveOccurred()) output, err2 := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output() o.Expect(err2).NotTo(o.HaveOccurred()) o.Expect(output).To(o.ContainSubstring("macvlan-nad1")) exutil.By("Create 1st pod in ns1") pod1ns1 := testPodMultinetwork{ name: "blue-pod-1", namespace: ns1, nodename: "worker-0", nadname: "macvlan-nad1", labelname: "blue-openshift", template: pingPodTemplate, } pod1ns1.createTestPodMultinetwork(oc) waitPodReady(oc, pod1ns1.namespace, pod1ns1.name) exutil.By("Create second pod in ns1") pod2ns1 := testPodMultinetwork{ name: "blue-pod-2", namespace: ns1, nodename: "worker-1", nadname: "macvlan-nad1", labelname: "blue-openshift", template: pingPodTemplate, } pod2ns1.createTestPodMultinetwork(oc) waitPodReady(oc, pod2ns1.namespace, pod2ns1.name) exutil.By("Create third pod in ns1") pod3ns1 := testPodMultinetwork{ name: "red-pod-1", namespace: ns1, nodename: "worker-0", nadname: "macvlan-nad1", labelname: "red-openshift", template: pingPodTemplate, } pod3ns1.createTestPodMultinetwork(oc) waitPodReady(oc, pod3ns1.namespace, pod3ns1.name) exutil.By("Create MultiNetworkPolicy-NAD2 in ns2") err4 := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", netAttachDefFile2, "-n", ns2).Execute() o.Expect(err4).NotTo(o.HaveOccurred()) output, err5 := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns2).Output() o.Expect(err5).NotTo(o.HaveOccurred()) o.Expect(output).To(o.ContainSubstring("macvlan-nad2")) exutil.By("Create 1st pod in ns2") pod1ns2 := testPodMultinetwork{ name: "blue-pod-3", namespace: ns2, nodename: "worker-0", nadname: "macvlan-nad2", labelname: "blue-openshift", template: pingPodTemplate, } pod1ns2.createTestPodMultinetwork(oc) waitPodReady(oc, pod1ns2.namespace, pod1ns2.name) exutil.By("Create second pod in ns2") pod2ns2 := testPodMultinetwork{ name: "red-pod-2", namespace: ns2, nodename: "worker-0", nadname: "macvlan-nad2", labelname: "red-openshift", template: pingPodTemplate, } pod2ns2.createTestPodMultinetwork(oc) waitPodReady(oc, pod2ns2.namespace, pod2ns2.name) } // check if an ip address is added to node's NIC, or removed from node's NIC func checkPrimaryNIC(oc *exutil.CLI, nodeName string, ip string, flag bool) { checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, err := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "/usr/sbin/ip -4 -brief address show") if err != nil { e2e.Logf("Cannot get primary NIC interface, errors: %v, try again", err) return false, nil } if flag && !strings.Contains(output, ip) { e2e.Logf("egressIP has not been added to node's NIC correctly, try again") return false, nil } if !flag && strings.Contains(output, ip) { e2e.Logf("egressIP has not been removed from node's NIC correctly, try again") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Failed to get NIC on the host:%s", checkErr)) } func checkEgressIPonSDNHost(oc *exutil.CLI, node string, expectedEgressIP []string) { checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { ip, err := getEgressIPByKind(oc, "hostsubnet", node, len(expectedEgressIP)) if err != nil { e2e.Logf("\n got the error: %v\n, try again", err) return false, nil } if !unorderedEqual(ip, expectedEgressIP) { e2e.Logf("\n got egressIP as %v while expected egressIP is %v, try again", ip, expectedEgressIP) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Failed to get egressIP on the host:%s", checkErr)) } func unorderedEqual(first, second []string) bool { if len(first) != len(second) { return false } for _, value := range first { if !contains(second, value) { return false } } return true } func checkovnkubeMasterNetworkProgrammingetrics(oc *exutil.CLI, url string, metrics string) { var metricsOutput []byte olmToken, err := exutil.GetSAToken(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(olmToken).NotTo(o.BeEmpty()) metricsErr := wait.Poll(5*time.Second, 10*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", olmToken), fmt.Sprintf("%s", url)).OutputToFile("metrics.txt") if err != nil { e2e.Logf("Can't get metrics and try again, the error is:%s", err) return false, nil } metricsOutput, _ = exec.Command("bash", "-c", "cat "+output+" | grep "+metrics+" | awk 'NR==2{print $2}'").Output() metricsValue := strings.TrimSpace(string(metricsOutput)) if metricsValue != "" { e2e.Logf("The output of the metrics for %s is : %v", metrics, metricsValue) } else { e2e.Logf("Can't get metrics for %s:", metrics) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr)) } func getControllerManagerLeaderIP(oc *exutil.CLI) string { leaderPodName, leaderErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("lease", "openshift-master-controllers", "-n", "openshift-controller-manager", "-o=jsonpath={.spec.holderIdentity}").Output() o.Expect(leaderErr).NotTo(o.HaveOccurred()) o.Expect(leaderPodName).ShouldNot(o.BeEmpty(), "leader pod name is empty") e2e.Logf("The leader pod name is %s", leaderPodName) leaderPodIP := getPodIPv4(oc, "openshift-controller-manager", leaderPodName) return leaderPodIP } func describeCheckEgressIPByKind(oc *exutil.CLI, kind string, kindName string) string { output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args(kind, kindName).Output() o.Expect(err).NotTo(o.HaveOccurred()) egressIPReg, _ := regexp.Compile(".*Egress IPs.*") egressIPStr := egressIPReg.FindString(output) egressIPArr := strings.Split(egressIPStr, ":") //remove whitespace in front of the ip address ip := strings.TrimSpace(egressIPArr[1]) e2e.Logf("get egressIP from oc describe %v %v: --->%s<---", kind, kindName, ip) return ip } func findUnUsedIPsOnNodeOrFail(oc *exutil.CLI, nodeName, cidr string, expectedNum int) []string { freeIPs := findUnUsedIPsOnNode(oc, nodeName, cidr, expectedNum) if len(freeIPs) != expectedNum { g.Skip("Did not get enough free IPs for the test, skip the test.") } return freeIPs } func (pod *externalIPPod) createExternalIPPod(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create the externalIP pod %v", pod.name)) } func checkParameter(oc *exutil.CLI, namespace string, kind string, kindName string, parameter string) string { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", namespace, kind, kindName, parameter).Output() o.Expect(err).NotTo(o.HaveOccurred()) return output } func patchReplaceResourceAsAdmin(oc *exutil.CLI, resource, patch string, nameSpace ...string) { var cargs []string if len(nameSpace) > 0 { cargs = []string{resource, "-p", patch, "-n", nameSpace[0], "--type=json"} } else { cargs = []string{resource, "-p", patch, "--type=json"} } err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(cargs...).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } // For SingleStack function returns IPv6 or IPv4 hostsubnet in case OVN // For SDN plugin returns only IPv4 hostsubnet // Dual stack not supported on openshiftSDN // IPv6 single stack not supported on openshiftSDN // network can be "default" for the default network or UDN network name func getNodeSubnet(oc *exutil.CLI, nodeName string, network string) string { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.k8s\\.ovn\\.org/node-subnets}").Output() o.Expect(err).NotTo(o.HaveOccurred()) var data map[string]interface{} json.Unmarshal([]byte(output), &data) hostSubnets := data[network].([]interface{}) hostSubnet := hostSubnets[0].(string) return hostSubnet } func getNodeSubnetDualStack(oc *exutil.CLI, nodeName string, network string) (string, string) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.k8s\\.ovn\\.org/node-subnets}").Output() o.Expect(err).NotTo(o.HaveOccurred()) var data map[string]interface{} json.Unmarshal([]byte(output), &data) hostSubnets := data[network].([]interface{}) hostSubnetIPv4 := hostSubnets[0].(string) hostSubnetIPv6 := hostSubnets[1].(string) e2e.Logf("Host subnet is %v and %v", hostSubnetIPv4, hostSubnetIPv6) return hostSubnetIPv4, hostSubnetIPv6 } func getIPv4Capacity(oc *exutil.CLI, nodeName string) string { ipv4Capacity := "" egressIPConfig, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.cloud\\.network\\.openshift\\.io/egress-ipconfig}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The egressipconfig is %v \n", egressIPConfig) switch exutil.CheckPlatform(oc) { case "aws": ipv4Capacity = strings.Split(strings.Split(egressIPConfig, ":")[5], ",")[0] case "gcp": ipv4Capacity = strings.Split(egressIPConfig, ":")[5] ipv4Capacity = ipv4Capacity[:len(ipv4Capacity)-3] default: e2e.Logf("Not support cloud provider for auto egressip cases for now.") g.Skip("Not support cloud provider for auto egressip cases for now.") } return ipv4Capacity } func (aclSettings *aclSettings) getJSONString() string { jsonACLSetting, _ := json.Marshal(aclSettings) annotationString := "k8s.ovn.org/acl-logging=" + string(jsonACLSetting) return annotationString } func enableACLOnNamespace(oc *exutil.CLI, namespace, denyLevel, allowLevel string) { e2e.Logf("Enable ACL looging on the namespace %s", namespace) aclSettings := aclSettings{DenySetting: denyLevel, AllowSetting: allowLevel} err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("--overwrite", "ns", namespace, aclSettings.getJSONString()).Execute() o.Expect(err1).NotTo(o.HaveOccurred()) } func disableACLOnNamespace(oc *exutil.CLI, namespace string) { e2e.Logf("Disable ACL looging on the namespace %s", namespace) err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", namespace, "k8s.ovn.org/acl-logging-").Execute() o.Expect(err1).NotTo(o.HaveOccurred()) } func getNodeMacAddress(oc *exutil.CLI, nodeName string) string { var macAddress string output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.k8s\\.ovn\\.org/l3-gateway-config}").Output() o.Expect(err).NotTo(o.HaveOccurred()) var data map[string]interface{} json.Unmarshal([]byte(output), &data) l3GatewayConfigAnnotations := data["default"].(interface{}) l3GatewayConfigAnnotationsJSON := l3GatewayConfigAnnotations.(map[string]interface{}) macAddress = l3GatewayConfigAnnotationsJSON["mac-address"].(string) return macAddress } // check if an env is in a configmap in specific namespace [usage: checkConfigMap(oc, namesapce, configmapName, envString)] func checkEnvInConfigMap(oc *exutil.CLI, ns, configmapName string, envString string) error { err := checkConfigMap(oc, ns, configmapName) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cm %v is not found in namespace %v", configmapName, ns)) checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", ns, configmapName, "-oyaml").Output() if err != nil { e2e.Logf("Failed to get configmap %v, error: %s. Trying again", configmapName, err) return false, nil } if !strings.Contains(output, envString) { e2e.Logf("Did not find %v in ovnkube-config configmap,try next round.", envString) return false, nil } return true, nil }) return checkErr } // check if certain log message is in a pod in specific namespace func checkLogMessageInPod(oc *exutil.CLI, namespace string, containerName string, podName string, filter string) (string, error) { var podLogs string var err, checkErr error checkErr = wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { podLogs, err = exutil.GetSpecificPodLogsCombinedOrNot(oc, namespace, containerName, podName, filter, true) if len(podLogs) == 0 || err != nil { e2e.Logf("did not get expected podLogs: %v, or have err:%v, try again", podLogs, err) return false, nil } return true, nil }) if checkErr != nil { return podLogs, fmt.Errorf(fmt.Sprintf("fail to get expected log in pod %v, err: %v", podName, err)) } return podLogs, nil } // get OVN-Kubernetes management interface (ovn-k8s-mp0) IP for the node func getOVNK8sNodeMgmtIPv4(oc *exutil.CLI, nodeName string) string { var output string var err error checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, err = exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "/usr/sbin/ip -4 -brief address show | grep ovn-k8s-mp0") if output == "" || err != nil { e2e.Logf("Did not get node's management interface, errors: %v, try again", err) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to get management interface for node %v, err: %v", nodeName, checkErr)) e2e.Logf("Match out the OVN-Kubernetes management IP address for the node") re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) nodeOVNK8sMgmtIP := re.FindAllString(output, -1)[0] e2e.Logf("Got ovn-k8s management interface IP for node %v as: %v", nodeName, nodeOVNK8sMgmtIP) return nodeOVNK8sMgmtIP } // findLogFromPod will search logs for a specific string in the specific container of the pod or just the pod func findLogFromPod(oc *exutil.CLI, searchString string, namespace string, podLabel string, podContainer ...string) bool { findLog := false podNames := getPodName(oc, namespace, podLabel) var cargs []string for _, podName := range podNames { if len(podContainer) > 0 { cargs = []string{podName, "-c", podContainer[0], "-n", namespace} } else { cargs = []string{podName, "-n", namespace} } output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(cargs...).OutputToFile("podlog") o.Expect(err).NotTo(o.HaveOccurred()) grepOutput, err := exec.Command("bash", "-c", "cat "+output+" | grep -i '"+searchString+"' | wc -l").Output() o.Expect(err).NotTo(o.HaveOccurred()) grepOutputString := strings.TrimSpace(string(grepOutput)) if grepOutputString != "0" { e2e.Logf("Found the '%s' string in %s number of lines.", searchString, grepOutputString) findLog = true break } } return findLog } // searchOVNDBForSpecCmd This is used for lr-policy-list and snat rules check in ovn db. func searchOVNDBForSpecCmd(oc *exutil.CLI, cmd, searchKeyword string, times int) error { ovnPod := getOVNKMasterOVNkubeNode(oc) o.Expect(ovnPod).ShouldNot(o.Equal("")) var cmdOutput string checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { output, cmdErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, cmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try next ...,", cmdErr) return false, nil } cmdOutput = output if strings.Count(output, searchKeyword) == times { return true, nil } return false, nil }) if checkOVNDbErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", cmdOutput) } return checkOVNDbErr } // waitEgressFirewallApplied Wait egressfirewall applied func waitEgressFirewallApplied(oc *exutil.CLI, efName, ns string) error { checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, efErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", "-n", ns, efName).Output() if efErr != nil { e2e.Logf("Failed to get egressfirewall %v, error: %s. Trying again", efName, efErr) return false, nil } if !strings.Contains(output, "EgressFirewall Rules applied") { e2e.Logf("The egressfirewall was not applied, trying again. \n %s", output) return false, nil } return true, nil }) return checkErr } // switchOVNGatewayMode will switch to requested mode, shared or local func switchOVNGatewayMode(oc *exutil.CLI, mode string) { currentMode := getOVNGatewayMode(oc) if currentMode == "local" && mode == "shared" { e2e.Logf("Migrating cluster to shared gateway mode") patchResourceAsAdmin(oc, "network.operator/cluster", "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"gatewayConfig\":{\"routingViaHost\": false}}}}}") } else if currentMode == "shared" && mode == "local" { e2e.Logf("Migrating cluster to Local gw mode") patchResourceAsAdmin(oc, "network.operator/cluster", "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"gatewayConfig\":{\"routingViaHost\": true}}}}}") } else { e2e.Logf("Cluster is already on requested gateway mode") } _, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", "openshift-ovn-kubernetes", "ds", "ovnkube-node").Output() o.Expect(err).NotTo(o.HaveOccurred()) //on OVN IC it takes upto 660 seconds for nodes ds to rollout so lets poll with timeout of 700 seconds waitForNetworkOperatorState(oc, 100, 18, "True.*False.*False") } // getOVNGatewayMode will return configured OVN gateway mode, shared or local func getOVNGatewayMode(oc *exutil.CLI) string { nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) if len(nodeList.Items) < 1 { g.Skip("This case requires at least one schedulable node") } output, err := oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("describe").Args("node", nodeList.Items[0].Name).Output() o.Expect(err).NotTo(o.HaveOccurred()) str := "local" modeString := strconv.Quote(str) if strings.Contains(output, modeString) { e2e.Logf("Cluster is running on OVN Local Gateway Mode") return str } return "shared" } func getEgressCIDRsForNode(oc *exutil.CLI, nodeName string) string { var sub1 string platform := exutil.CheckPlatform(oc) if strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") || strings.Contains(platform, "nutanix") || strings.Contains(platform, "powervs") { defaultSubnetV4, err := getDefaultSubnet(oc) o.Expect(err).NotTo(o.HaveOccurred()) _, ipNet, err1 := net.ParseCIDR(defaultSubnetV4) o.Expect(err1).NotTo(o.HaveOccurred()) e2e.Logf("ipnet: %v", ipNet) sub1 = ipNet.String() e2e.Logf("\n\n\n sub1 as -->%v<--\n\n\n", sub1) } else { sub1 = getIfaddrFromNode(nodeName, oc) } return sub1 } // get routerID by node name func getRouterID(oc *exutil.CLI, nodeName string) (string, error) { // get the ovnkube-node pod on the node ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(ovnKubePod).ShouldNot(o.Equal("")) var cmdOutput, routerName, routerID string var cmdErr error routerName = "GR_" + nodeName cmd := "ovn-nbctl show | grep " + routerName + " | grep 'router '|awk '{print $2}'" checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKubePod, cmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } // Command output always has first line as: Defaulted container "northd" out of: northd, nbdb, kube-rbac-proxy, sbdb, ovnkube-master, ovn-dbchecker // Take result from the second line cmdOutputLines := strings.Split(cmdOutput, "\n") if len(cmdOutputLines) >= 2 { routerID = cmdOutputLines[1] return true, nil } e2e.Logf("%v,Waiting for expected result to be synced, try again ...,") return false, nil }) if checkOVNDbErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", cmdOutput) } return routerID, checkOVNDbErr } func getSNATofEgressIP(oc *exutil.CLI, nodeName, egressIP string) ([]string, error) { // get the ovnkube-node pod on the node ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(ovnKubePod).ShouldNot(o.Equal("")) var cmdOutput string var cmdErr error var snatIP []string routerName := "GR_" + nodeName cmd := "ovn-nbctl lr-nat-list " + routerName + " | grep " + egressIP + " |awk '{print $3}'" checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubePod, "northd", cmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } if cmdOutput != "" { cmdOutputLines := strings.Split(cmdOutput, "\n") for i := 0; i < len(cmdOutputLines); i++ { snatIP = append(snatIP, cmdOutputLines[i]) } return true, nil } e2e.Logf("Waiting for expected result to be synced, try again ...") return false, nil }) if checkOVNDbErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", cmdOutput) } return snatIP, checkOVNDbErr } // enableSCTPModuleOnNode Manual way to enable sctp in a cluster func enableSCTPModuleOnNode(oc *exutil.CLI, nodeName, role string) { e2e.Logf("This is %s worker node: %s", role, nodeName) checkSCTPCmd := "cat /sys/module/sctp/initstate" output, err := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", checkSCTPCmd) var installCmd string if err != nil || !strings.Contains(output, "live") { e2e.Logf("No sctp module installed, will enable sctp module!!!") if strings.EqualFold(role, "rhel") { // command for rhel nodes installCmd = "yum install -y kernel-modules-extra-`uname -r` && insmod /usr/lib/modules/`uname -r`/kernel/net/sctp/sctp.ko.xz" } else { // command for rhcos nodes installCmd = "modprobe sctp" } e2e.Logf("Install command is %s", installCmd) // Try 3 times to enable sctp o.Eventually(func() error { _, installErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", installCmd) if installErr != nil && strings.EqualFold(role, "rhel") { e2e.Logf("%v", installErr) g.Skip("Yum insall to enable sctp cannot work in a disconnected cluster, skip the test!!!") } return installErr }, "15s", "5s").ShouldNot(o.HaveOccurred(), fmt.Sprintf("Failed to install sctp module on node %s", nodeName)) // Wait for sctp applied o.Eventually(func() string { output, err := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", checkSCTPCmd) if err != nil { e2e.Logf("Wait for sctp applied, %v", err) } return output }, "60s", "10s").Should(o.ContainSubstring("live"), fmt.Sprintf("Failed to load sctp module on node %s", nodeName)) } else { e2e.Logf("sctp module is loaded on node %s\n%s", nodeName, output) } } func prepareSCTPModule(oc *exutil.CLI, sctpModule string) { nodesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(nodesOutput, "SchedulingDisabled") || strings.Contains(nodesOutput, "NotReady") { g.Skip("There are already some nodes in NotReady or SchedulingDisabled status in cluster, skip the test!!! ") } workerNodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) if err != nil || len(workerNodeList.Items) == 0 { g.Skip("Can not find any woker nodes in the cluster") } // Will enable sctp by command rhelWorkers, err := exutil.GetAllWorkerNodesByOSID(oc, "rhel") o.Expect(err).NotTo(o.HaveOccurred()) if len(rhelWorkers) > 0 { e2e.Logf("There are %v number rhel workers in this cluster, will use manual way to load sctp module.", len(rhelWorkers)) for _, worker := range rhelWorkers { enableSCTPModuleOnNode(oc, worker, "rhel") } } rhcosWorkers, err := exutil.GetAllWorkerNodesByOSID(oc, "rhcos") o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("%v", rhcosWorkers) if len(rhcosWorkers) > 0 { for _, worker := range rhcosWorkers { enableSCTPModuleOnNode(oc, worker, "rhcos") } } } // getIPv4Gateway get ipv4 gateway address func getIPv4Gateway(oc *exutil.CLI, nodeName string) string { cmd := "ip -4 route | grep default | awk '{print $3}'" output, err := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd) o.Expect(err).NotTo(o.HaveOccurred()) re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) ips := re.FindAllString(output, -1) if len(ips) == 0 { return "" } e2e.Logf("The default gateway of node %s is %s", nodeName, ips[0]) return ips[0] } // getInterfacePrefix return the prefix of the primary interface IP func getInterfacePrefix(oc *exutil.CLI, nodeName string) string { defInf, err := getDefaultInterface(oc) o.Expect(err).NotTo(o.HaveOccurred()) cmd := fmt.Sprintf("ip -4 -brief a show %s | awk '{print $3}' ", defInf) output, err := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("IP address for default interface %s is %s", defInf, output) sli := strings.Split(output, "/") if len(sli) > 0 { return strings.Split(sli[1], "\n")[0] } return "24" } func excludeSriovNodes(oc *exutil.CLI) []string { // In rdu1 and rdu2 clusters, there are two sriov nodes with mlx nic, by default, egressrouter case cannot run on it // So here exclude sriov nodes in rdu1 and rdu2 clusters, just use the other common worker nodes var workers []string nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) for _, node := range nodeList.Items { _, ok := node.Labels["node-role.kubernetes.io/sriov"] if !ok { e2e.Logf("node %s is not sriov node,add it to worker list.", node.Name) workers = append(workers, node.Name) } } return workers } func getSriovNodes(oc *exutil.CLI) []string { // In rdu1 and rdu2 clusters, there are two sriov nodes with mlx nic var workers string workers, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/sriov", "--no-headers", "-o=custom-columns=NAME:.metadata.name").Output() o.Expect(err).NotTo(o.HaveOccurred()) return strings.Split(workers, "\n") } func checkClusterStatus(oc *exutil.CLI, expectedStatus string) { // get all master nodes masterNodes, getAllMasterNodesErr := exutil.GetClusterNodesBy(oc, "master") o.Expect(getAllMasterNodesErr).NotTo(o.HaveOccurred()) o.Expect(masterNodes).NotTo(o.BeEmpty()) // check master nodes status, expect Ready status for them for _, masterNode := range masterNodes { checkNodeStatus(oc, masterNode, "Ready") } // get all worker nodes workerNodes, getAllWorkerNodesErr := exutil.GetClusterNodesBy(oc, "worker") o.Expect(getAllWorkerNodesErr).NotTo(o.HaveOccurred()) o.Expect(workerNodes).NotTo(o.BeEmpty()) // check worker nodes status, expect Ready status for them for _, workerNode := range masterNodes { checkNodeStatus(oc, workerNode, "Ready") } } func getOVNKCtrlPlanePodOnHostedCluster(oc *exutil.CLI, namespace, cmName, hyperShiftMgmtNS string) string { // get leader ovnkube-control-plane pod on hypershift hosted cluster ovnkCtrlPlanePodLead, leaderErr := oc.AsGuestKubeconf().Run("get").Args("lease", "ovn-kubernetes-master", "-n", "openshift-ovn-kubernetes", "-o=jsonpath={.spec.holderIdentity}").Output() o.Expect(leaderErr).NotTo(o.HaveOccurred()) e2e.Logf("ovnkube-control-plane pod of the hosted cluster is %s", ovnkCtrlPlanePodLead) return ovnkCtrlPlanePodLead } func waitForPodWithLabelReadyOnHostedCluster(oc *exutil.CLI, ns, label string) error { return wait.Poll(15*time.Second, 10*time.Minute, func() (bool, error) { status, err := oc.AsAdmin().AsGuestKubeconf().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output() e2e.Logf("the Ready status of pod is %v", status) if err != nil || status == "" { e2e.Logf("failed to get pod status: %v, retrying...", err) return false, nil } if strings.Contains(status, "False") { e2e.Logf("the pod Ready status not met; wanted True but got %v, retrying...", status) return false, nil } return true, nil }) } func getPodNameOnHostedCluster(oc *exutil.CLI, namespace, label string) []string { var podName []string podNameAll, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "pod", "-l", label, "-ojsonpath={.items..metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) podName = strings.Split(podNameAll, " ") e2e.Logf("The pod(s) are %v ", podName) return podName } func getReadySchedulableNodesOnHostedCluster(oc *exutil.CLI) ([]string, error) { output, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("node", "-ojsonpath={.items[*].metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) var nodesOnHostedCluster, schedulableNodes []string nodesOnHostedCluster = strings.Split(output, " ") for _, nodeName := range nodesOnHostedCluster { err := wait.Poll(10*time.Second, 15*time.Minute, func() (bool, error) { statusOutput, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("nodes", nodeName, "-ojsonpath={.status.conditions[-1].status}").Output() if err != nil { e2e.Logf("\nGet node status with error : %v", err) return false, nil } if statusOutput != "True" { return false, nil } schedulableNodes = append(schedulableNodes, nodeName) return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Node %s is not in expected status %s", nodeName, "Ready")) } e2e.Logf("Scheduleable nodes on hosted cluster are: %v ", schedulableNodes) return schedulableNodes, nil } func checkLogMessageInPodOnHostedCluster(oc *exutil.CLI, namespace string, containerName string, podName string, filter string) (string, error) { var podLogs string var err, checkErr error checkErr = wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { podLogs, err = exutil.GetSpecificPodLogs(oc.AsAdmin().AsGuestKubeconf(), namespace, containerName, podName, filter) if len(podLogs) == 0 || err != nil { e2e.Logf("did not get expected podLog: %v, or have err:%v, try again", podLogs, err) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to get expected log in pod %v, err: %v", podName, checkErr)) return podLogs, nil } // get OVN-Kubernetes management interface (ovn-k8s-mp0) IP for the node on hosted cluster func getOVNK8sNodeMgmtIPv4OnHostedCluster(oc *exutil.CLI, nodeName string) string { var output string var outputErr error defer exutil.RecoverNamespaceRestricted(oc.AsGuestKubeconf(), "default") exutil.SetNamespacePrivileged(oc.AsGuestKubeconf(), "default") checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, outputErr = oc.AsGuestKubeconf().WithoutNamespace().Run("debug").Args("-n", "default", "node/"+nodeName, "--", "chroot", "/host", "bash", "-c", "/usr/sbin/ip -4 -brief address show | grep ovn-k8s-mp0").Output() if output == "" || outputErr != nil { e2e.Logf("Did not get node's management interface on hosted cluster, errors: %v, try again", outputErr) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to get management interface for node %v, err: %v", nodeName, checkErr)) e2e.Logf("Match out the OVN-Kubernetes management IP address for the node on hosted cluster") re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) nodeOVNK8sMgmtIPOnHostedCluster := re.FindAllString(output, -1)[0] e2e.Logf("Got ovn-k8s management interface IP for node on hosted cluster %v as: %v", nodeName, nodeOVNK8sMgmtIPOnHostedCluster) return nodeOVNK8sMgmtIPOnHostedCluster } // execute command on debug node with chroot on node of hosted cluster func execCmdOnDebugNodeOfHostedCluster(oc *exutil.CLI, nodeName string, cmdOptions []string) error { cargs := []string{"node/" + nodeName, "--", "chroot", "/host"} if len(cmdOptions) > 0 { cargs = append(cargs, cmdOptions...) } debugErr := oc.AsGuestKubeconf().WithoutNamespace().Run("debug").Args(cargs...).Execute() return debugErr } // check the cronjobs in the openshift-multus namespace func getMultusCronJob(oc *exutil.CLI) string { cronjobLog, cronjobErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("cronjobs", "-n", "openshift-multus").Output() o.Expect(cronjobErr).NotTo(o.HaveOccurred()) return cronjobLog } // get name of OVN egressIP object(s) func getOVNEgressIPObject(oc *exutil.CLI) []string { var egressIPObjects = []string{} egressIPObjectsAll, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressip", "-ojsonpath={.items..metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(egressIPObjectsAll) > 0 { egressIPObjects = strings.Split(egressIPObjectsAll, " ") } e2e.Logf("egressIPObjects are %v ", egressIPObjects) return egressIPObjects } // Pod's seconary interface can be assigned with ipv4 only, ipv6 only or dualstack address. getPodMultiNetwork can get ipv4 only and dualstack address but not ipv6 only address // getPodMultiNetworkIPv6 will defined to get ipv6 only address. func getPodMultiNetworkIPv6(oc *exutil.CLI, namespace string, podName string) string { cmd1 := "ip a sho net1 | awk 'NR==3{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'" podIPv6, err1 := e2eoutput.RunHostCmd(namespace, podName, cmd1) o.Expect(err1).NotTo(o.HaveOccurred()) MultiNetworkIPv6 := strings.TrimSpace(podIPv6) return MultiNetworkIPv6 } // get node that hosts the egressIP func getHostsubnetByEIP(oc *exutil.CLI, expectedEIP string) string { var nodeHostsEIP string nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) for i, v := range nodeList.Items { ip, err := getEgressIPByKind(oc, "hostsubnet", nodeList.Items[i].Name, 1) o.Expect(err).NotTo(o.HaveOccurred()) if ip[0] == expectedEIP { e2e.Logf("Found node %v host egressip %v ", v.Name) nodeHostsEIP = nodeList.Items[i].Name break } } return nodeHostsEIP } // find the ovn-K cluster manager master pod func getOVNKMasterPod(oc *exutil.CLI) string { leaderCtrlPlanePod, leaderNodeLogerr := oc.AsAdmin().WithoutNamespace().Run("get").Args("lease", "ovn-kubernetes-master", "-n", "openshift-ovn-kubernetes", "-o=jsonpath={.spec.holderIdentity}").Output() o.Expect(leaderNodeLogerr).NotTo(o.HaveOccurred()) return leaderCtrlPlanePod } // find the cluster-manager's ovnkube-node for accessing master components func getOVNKMasterOVNkubeNode(oc *exutil.CLI) string { leaderPod, leaderNodeLogerr := oc.AsAdmin().WithoutNamespace().Run("get").Args("lease", "ovn-kubernetes-master", "-n", "openshift-ovn-kubernetes", "-o=jsonpath={.spec.holderIdentity}").Output() o.Expect(leaderNodeLogerr).NotTo(o.HaveOccurred()) leaderNodeName, getNodeErr := exutil.GetPodNodeName(oc, "openshift-ovn-kubernetes", leaderPod) o.Expect(getNodeErr).NotTo(o.HaveOccurred()) ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", leaderNodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) return ovnKubePod } // enable multicast on specific namespace func enableMulticast(oc *exutil.CLI, ns string) { _, err := runOcWithRetry(oc.AsAdmin().WithoutNamespace(), "annotate", "namespace", ns, "k8s.ovn.org/multicast-enabled=true") o.Expect(err).NotTo(o.HaveOccurred()) } func getCNOStatusCondition(oc *exutil.CLI) string { CNOStatusCondition, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusteroperators", "network", "-o=jsonpath={.status.conditions}").Output() o.Expect(err).NotTo(o.HaveOccurred()) return CNOStatusCondition } // return severity, expr and runbook of specific ovn alert in networking-rules func getOVNAlertNetworkingRules(oc *exutil.CLI, alertName string) (string, string, string) { // get all ovn alert names in networking-rules ns := "openshift-ovn-kubernetes" allAlerts, nameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "networking-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output() o.Expect(nameErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert are %v", allAlerts) if !strings.Contains(allAlerts, alertName) { e2e.Failf("Target alert %v is not found", alertName) return "", "", "" } else { var severity, expr string severity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\""+alertName+"\")].labels.severity}").Output() o.Expect(severityErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert severity is %v", severity) expr, exprErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\""+alertName+"\")].expr}").Output() o.Expect(exprErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert expr is %v", expr) runbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "networking-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\""+alertName+"\")].annotations.runbook_url}").Output() o.Expect(runbookErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert runbook is %v", runbook) return severity, expr, runbook } } // return severity, expr and runbook of specific ovn alert in master-rules func getOVNAlertMasterRules(oc *exutil.CLI, alertName string) (string, string, string) { // get all ovn alert names in networking-rules ns := "openshift-ovn-kubernetes" allAlerts, nameErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "master-rules", "-o=jsonpath={.spec.groups[*].rules[*].alert}").Output() o.Expect(nameErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert are %v", allAlerts) if !strings.Contains(allAlerts, alertName) { e2e.Failf("Target alert %v is not found", alertName) return "", "", "" } else { var severity, expr string severity, severityErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\""+alertName+"\")].labels.severity}").Output() o.Expect(severityErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert severity is %v", severity) expr, exprErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\""+alertName+"\")].expr}").Output() o.Expect(exprErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert expr is %v", expr) runbook, runbookErr := oc.AsAdmin().Run("get").Args("prometheusrule", "-n", ns, "master-rules", "-o=jsonpath={.spec.groups[*].rules[?(@.alert==\""+alertName+"\")].annotations.runbook_url}").Output() o.Expect(runbookErr).NotTo(o.HaveOccurred()) e2e.Logf("The alert runbook is %v", runbook) return severity, expr, runbook } } // returns all the logical routers and switches on all the nodes func getOVNConstructs(oc *exutil.CLI, constructType string, nodeNames []string) []string { var ovnConstructs []string var matchStr string //var cmdOutput string getCmd := "ovn-nbctl --no-leader-only " + constructType ovnPod := getOVNKMasterOVNkubeNode(oc) o.Expect(ovnPod).ShouldNot(o.Equal("")) checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPod, getCmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } o.Expect(cmdOutput).ShouldNot(o.Equal("")) for _, index := range strings.Split(cmdOutput, "\n") { for _, node := range nodeNames { if constructType == "ls-list" { matchStr = fmt.Sprintf("\\((%s\\))", node) } else { matchStr = fmt.Sprintf("\\((GR_%s\\))", node) } re := regexp.MustCompile(matchStr) if re.FindString(index) != "" { ovnConstruct := strings.Fields(index) ovnConstructs = append(ovnConstructs, ovnConstruct[0]) } } } return true, nil }) if checkOVNDbErr != nil { e2e.Logf("The result in ovndb is not expected ! See below output \n %s ", checkOVNDbErr) } return ovnConstructs } // Returns the logical router or logical switch on a node func (svcEndpontDetails *svcEndpontDetails) getOVNConstruct(oc *exutil.CLI, constructType string) string { var ovnConstruct string var matchStr string getCmd := "ovn-nbctl " + constructType checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", svcEndpontDetails.ovnKubeNodePod, getCmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } if cmdOutput == "" { return true, nil } for _, index := range strings.Split(cmdOutput, "\n") { if constructType == "ls-list" { matchStr = fmt.Sprintf("\\((%s\\))", svcEndpontDetails.nodeName) } else { matchStr = fmt.Sprintf("\\((GR_%s\\))", svcEndpontDetails.nodeName) } re := regexp.MustCompile(matchStr) if re.FindString(index) != "" { matchedStr := strings.Fields(index) ovnConstruct = matchedStr[0] } } return true, nil }) if checkOVNDbErr != nil { e2e.Logf("The result in ovndb is not expected ! See below output \n %s ", checkOVNDbErr) } return ovnConstruct } // returns load balancer entries created for LB service type on routers or switches on all nodes func getOVNLBContructs(oc *exutil.CLI, constructType string, endPoint string, ovnConstruct []string) bool { var result bool ovnPod := getOVNKMasterOVNkubeNode(oc) o.Expect(ovnPod).ShouldNot(o.Equal("")) //only if the count for any of output is less than three the success will be false result = true for _, construct := range ovnConstruct { checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { getCmd := "ovn-nbctl --no-leader-only " + constructType + " " + construct + " | grep " + endPoint cmdOutput, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnPod, "northd", getCmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try next ...,", cmdErr) result = false return false, nil } if len(strings.Split(cmdOutput, "\n")) >= 2 { e2e.Logf("Required entries %s were created for service on %s", constructType, construct) result = true } else { e2e.Logf("Required entries %s were not created for service on %s", constructType, construct) result = false } return true, nil }) if checkOVNDbErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", checkOVNDbErr) result = false } } return result } // returns load balancer entries created for LB service type on routers or switches on a single node func (svcEndpontDetails *svcEndpontDetails) getOVNLBContruct(oc *exutil.CLI, constructType string, construct string) bool { var result bool //only if the count for any of output is less than three the success will be false result = true checkOVNDbErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { getCmd := "ovn-nbctl " + constructType + " " + construct + " | grep " + svcEndpontDetails.podIP cmdOutput, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", svcEndpontDetails.ovnKubeNodePod, "northd", getCmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try next ...,", cmdErr) result = false return false, nil } if len(strings.Split(cmdOutput, "\n")) >= 2 { e2e.Logf("Required entries %s were created for service on %s", constructType, construct) result = true } else { e2e.Logf("Required entries %s were not created for service on %s", constructType, construct) result = false } return true, nil }) if checkOVNDbErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", checkOVNDbErr) result = false } return result } func getServiceEndpoints(oc *exutil.CLI, serviceName string, serviceNamespace string) string { serviceEndpoint, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ep", serviceName, "-n", serviceNamespace, "--no-headers").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(serviceEndpoint).ShouldNot(o.BeEmpty()) e2e.Logf("Service endpoint %v", serviceEndpoint) result := strings.Fields(serviceEndpoint) return result[1] } func getOVNMetricsInSpecificContainer(oc *exutil.CLI, containerName string, podName string, url string, metricName string) string { var metricValue string metricsErr := wait.Poll(5*time.Second, 10*time.Second, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-ovn-kubernetes", "-c", containerName, podName, "--", "curl", url).OutputToFile("metrics.txt") if err != nil { e2e.Logf("Can't get metrics and try again, the error is:%s", err) return false, nil } metricOutput, getMetricErr := exec.Command("bash", "-c", "cat "+output+" | grep -e '^"+metricName+" ' | awk 'END {print $2}'").Output() o.Expect(getMetricErr).NotTo(o.HaveOccurred()) metricValue = strings.TrimSpace(string(metricOutput)) o.Expect(metricValue).ShouldNot(o.BeEmpty()) e2e.Logf("The output of the %s is : %v", metricName, metricValue) return true, nil }) exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Fail to get metric and the error is:%s", metricsErr)) return metricValue } // CurlNodePortPass checks nodeport svc reacability from a node regardless of network addressing type on cluster func CurlNodePortPass(oc *exutil.CLI, nodeNameFrom string, nodeNameTo string, nodePort string) { nodeIP1, nodeIP2 := getNodeIP(oc, nodeNameTo) if nodeIP1 != "" { nodev6URL := net.JoinHostPort(nodeIP1, nodePort) nodev4URL := net.JoinHostPort(nodeIP2, nodePort) output, _ := exutil.DebugNode(oc, nodeNameFrom, "curl", nodev4URL, "-s", "--connect-timeout", "5") o.Expect(output).Should(o.ContainSubstring("Hello OpenShift")) output, _ = exutil.DebugNode(oc, nodeNameFrom, "curl", nodev6URL, "-s", "--connect-timeout", "5") o.Expect(output).Should(o.ContainSubstring("Hello OpenShift")) } else { nodeURL := net.JoinHostPort(nodeIP2, nodePort) output, _ := exutil.DebugNode(oc, nodeNameFrom, "curl", nodeURL, "-s", "--connect-timeout", "5") o.Expect(output).Should(o.ContainSubstring("Hello OpenShift")) } } // CurlNodePortFail checks nodeport svc unreacability from a node regardless of network addressing type on cluster func CurlNodePortFail(oc *exutil.CLI, nodeNameFrom string, nodeNameTo string, nodePort string) { nodeIP1, nodeIP2 := getNodeIP(oc, nodeNameTo) if nodeIP1 != "" { nodev6URL := net.JoinHostPort(nodeIP1, nodePort) nodev4URL := net.JoinHostPort(nodeIP2, nodePort) output, _ := exutil.DebugNode(oc, nodeNameFrom, "curl", nodev4URL, "--connect-timeout", "5") o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("timed out"), o.ContainSubstring("Connection refused"))) output, _ = exutil.DebugNode(oc, nodeNameFrom, "curl", nodev6URL, "--connect-timeout", "5") o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("timed out"), o.ContainSubstring("Connection refused"))) } else { nodeURL := net.JoinHostPort(nodeIP2, nodePort) output, _ := exutil.DebugNode(oc, nodeNameFrom, "curl", nodeURL, "--connect-timeout", "5") o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("timed out"), o.ContainSubstring("Connection refused"))) } } func CurlPod2NodePortPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, nodeNameTo string, nodePort string) { nodeIP1, nodeIP2 := getNodeIP(oc, nodeNameTo) if nodeIP1 != "" { nodev6URL := net.JoinHostPort(nodeIP1, nodePort) nodev4URL := net.JoinHostPort(nodeIP2, nodePort) output, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+nodev4URL+" --connect-timeout 5") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("Hello OpenShift")) output, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+nodev6URL+" --connect-timeout 5") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("Hello OpenShift")) } else { nodeURL := net.JoinHostPort(nodeIP2, nodePort) output, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+nodeURL+" --connect-timeout 5") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(output).Should(o.ContainSubstring("Hello OpenShift")) } } func CurlPod2NodePortFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, nodeNameTo string, nodePort string) { nodeIP1, nodeIP2 := getNodeIP(oc, nodeNameTo) if nodeIP1 != "" { nodev6URL := net.JoinHostPort(nodeIP1, nodePort) nodev4URL := net.JoinHostPort(nodeIP2, nodePort) _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+nodev4URL+" --connect-timeout 5") o.Expect(err).To(o.HaveOccurred()) _, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+nodev6URL+" --connect-timeout 5") o.Expect(err).To(o.HaveOccurred()) } else { nodeURL := net.JoinHostPort(nodeIP2, nodePort) _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl "+nodeURL+" --connect-timeout 5") o.Expect(err).To(o.HaveOccurred()) } } // get primary NIC interface name func getPrimaryNICname(oc *exutil.CLI) string { masterNode, getMasterNodeErr := exutil.GetFirstMasterNode(oc) o.Expect(getMasterNodeErr).NotTo(o.HaveOccurred()) primary_int, err := exutil.DebugNodeWithChroot(oc, masterNode, "bash", "-c", "nmcli -g connection.interface-name c show ovs-if-phys0") o.Expect(err).NotTo(o.HaveOccurred()) primary_inf_name := strings.Split(primary_int, "\n") e2e.Logf("Primary Inteface name is : %s", primary_inf_name[0]) return primary_inf_name[0] } // get file contents to be modified for SCTP func getFileContentforSCTP(baseDir string, name string) (fileContent string) { filePath := filepath.Join(exutil.FixturePath("testdata", "networking", baseDir), name) fileOpen, err := os.Open(filePath) defer fileOpen.Close() if err != nil { e2e.Failf("Failed to open file: %s", filePath) } fileRead, _ := io.ReadAll(fileOpen) if err != nil { e2e.Failf("Failed to read file: %s", filePath) } return string(fileRead) } // get generic sctpclient pod yaml file, replace variables as per requirements func createSCTPclientOnNode(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) { PodGenericYaml := getFileContentforSCTP("sctp", "sctpclientspecificnode.yaml") for rep, value := range pod_pmtrs { PodGenericYaml = strings.ReplaceAll(PodGenericYaml, rep, value) } podFileName := "temp-sctp-client-pod-" + getRandomString() + ".yaml" defer os.Remove(podFileName) os.WriteFile(podFileName, []byte(PodGenericYaml), 0644) // create ping pod for Microshift _, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", podFileName).Output() return err } // get generic sctpserver pod yaml file, replace variables as per requirements func createSCTPserverOnNode(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) { PodGenericYaml := getFileContentforSCTP("sctp", "sctpserverspecificnode.yaml") for rep, value := range pod_pmtrs { PodGenericYaml = strings.ReplaceAll(PodGenericYaml, rep, value) } podFileName := "temp-sctp-server-pod-" + getRandomString() + ".yaml" defer os.Remove(podFileName) os.WriteFile(podFileName, []byte(PodGenericYaml), 0644) // create ping pod for Microshift _, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", podFileName).Output() return err } // configure IPSec at runtime, targetStatus can be full/disabled/external func configIPSecAtRuntime(oc *exutil.CLI, targetStatus string) (err error) { var targetConfig, currentStatus string ipsecState := checkIPsec(oc) if ipsecState == "{}" || ipsecState == "Full" { currentStatus = "full" } else if ipsecState == "Disabled" { currentStatus = "disabled" } else if ipsecState == "External" { currentStatus = "external" } if currentStatus == targetStatus { e2e.Logf("The IPSec is already in %v state", targetStatus) return } else if targetStatus == "full" { //In 4.15+, enabling/disabling ipsec would require nodes restart targetConfig = "true" e2e.Logf("Start to enable ipsec.") _, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("networks.operator.openshift.io", "cluster", "-p", "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipsecConfig\":{\"mode\":\"Full\"}}}}}", "--type=merge").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Wait the MC applying getting started") o.Eventually(func() error { err := exutil.AssertOrCheckMCP(oc, "master", 30*time.Second, 30*time.Second, false) return err }, "300s", "30s").ShouldNot(o.BeNil(), "MC applying didn't start yet.") e2e.Logf("Wait the MC were applied to nodes ") err = exutil.AssertOrCheckMCP(oc, "master", 60*time.Second, 30*time.Minute, false) o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.AssertOrCheckMCP(oc, "worker", 60*time.Second, 5*time.Minute, false) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("MC applying done ") e2e.Logf("Wait ipsec pods running in openshift-ovn-kubernetes") for i := 0; i < 2; i++ { err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovn-ipsec") if err == nil { break } } o.Expect(err).NotTo(o.HaveOccurred()) ovnLeaderpod := getOVNKMasterOVNkubeNode(oc) removeResource(oc, true, true, "pod", ovnLeaderpod, "-n", "openshift-ovn-kubernetes") e2e.Logf("Wait ovnkube-node pods running in openshift-ovn-kubernetes") err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node") o.Expect(err).NotTo(o.HaveOccurred()) } else if targetStatus == "disabled" { targetConfig = "false" _, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("networks.operator.openshift.io", "cluster", "-p", "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipsecConfig\":{\"mode\":\"Disabled\"}}}}}", "--type=merge").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Wait ovn-ipsec pods disappeared") err = waitForPodWithLabelGone(oc, "openshift-ovn-kubernetes", "app=ovn-ipsec") o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("Wait the MC applying getting started ") o.Eventually(func() error { err := exutil.AssertOrCheckMCP(oc, "master", 30*time.Second, 30*time.Second, false) return err }, "300s", "30s").ShouldNot(o.BeNil(), "MC applying didn't start yet.") e2e.Logf("Wait the MC were applied to nodes ") err = exutil.AssertOrCheckMCP(oc, "master", 60*time.Second, 30*time.Minute, false) o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.AssertOrCheckMCP(oc, "worker", 60*time.Second, 5*time.Minute, false) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("MC applying done ") } checkErr := checkIPSecInDB(oc, targetConfig) exutil.AssertWaitPollNoErr(checkErr, "check IPSec configuration failed") return nil } // check IPSec configuration in northd, targetConfig should be "true" or "false" func checkIPSecInDB(oc *exutil.CLI, targetConfig string) error { ovnLeaderpod := getOVNKMasterOVNkubeNode(oc) return wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { getIPSec, getErr := execCommandInSpecificPod(oc, "openshift-ovn-kubernetes", ovnLeaderpod, "ovn-nbctl --no-leader-only get nb_global . ipsec") o.Expect(getErr).NotTo(o.HaveOccurred()) if strings.Contains(getIPSec, targetConfig) { return true, nil } e2e.Logf("Can't get expected ipsec configuration and try again") return false, nil }) } // IsIPv4 check if the string is an IPv4 address. func IsIPv4(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ".") } // IsIPv6 check if the string is an IPv6 address. func IsIPv6(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ":") } // checkSCTPResultPASS func checkSCTPResultPASS(oc *exutil.CLI, namespace, sctpServerPodName, sctpClientPodname, dstIP, dstPort string) { exutil.By("sctpserver pod start to wait for sctp traffic") _, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background() o.Expect(err1).NotTo(o.HaveOccurred()) time.Sleep(5 * time.Second) exutil.By("check sctp process enabled in the sctp server pod") msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp") o.Expect(err2).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue()) exutil.By("sctpclient pod start to send sctp traffic") _, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+dstIP+" "+dstPort+" --sctp; }") o.Expect(err3).NotTo(o.HaveOccurred()) exutil.By("server sctp process will end after get sctp traffic from sctp client") time.Sleep(5 * time.Second) msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp") o.Expect(err4).NotTo(o.HaveOccurred()) o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp")) } func ovnkubeNodePod(oc *exutil.CLI, nodeName string) string { // get OVNkubeNode pod on specific node. ovnNodePod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-ovn-kubernetes", "pod", "-l app=ovnkube-node", "--field-selector", "spec.nodeName="+nodeName, "-o=jsonpath={.items[0].metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("The ovnkube-node pod on node %s is %s", nodeName, ovnNodePod) o.Expect(ovnNodePod).NotTo(o.BeEmpty()) return ovnNodePod } func waitForNetworkOperatorState(oc *exutil.CLI, interval int, timeout int, expectedStatus string) { waitForClusterOperatorState(oc, "network", interval, timeout, expectedStatus) } func waitForClusterOperatorState(oc *exutil.CLI, co string, interval int, timeout int, expectedStatus string) { errCheck := wait.Poll(time.Duration(interval)*time.Second, time.Duration(timeout)*time.Minute, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", co).Output() if err != nil { e2e.Logf("Fail to get clusteroperator network, error:%s. Trying again", err) return false, nil } if matched, _ := regexp.MatchString(expectedStatus, output); !matched { e2e.Logf("Network operator state is:%s", output) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timed out waiting for the expected condition")) } func enableIPForwardingOnSpecNodeNIC(oc *exutil.CLI, worker, secNIC string) { cmd := fmt.Sprintf("sysctl net.ipv4.conf.%s.forwarding", secNIC) output, debugNodeErr := exutil.DebugNode(oc, worker, "bash", "-c", cmd) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) if !strings.Contains(output, ".forwarding = 1") { e2e.Logf("Enable IP forwarding for NIC %s on node %s ...", secNIC, worker) enableCMD := fmt.Sprintf("sysctl -w net.ipv4.conf.%s.forwarding=1", secNIC) _, debugNodeErr = exutil.DebugNode(oc, worker, "bash", "-c", enableCMD) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) } e2e.Logf("IP forwarding was enabled for NIC %s on node %s!", secNIC, worker) } func disableIPForwardingOnSpecNodeNIC(oc *exutil.CLI, worker, secNIC string) { cmd := fmt.Sprintf("sysctl net.ipv4.conf.%s.forwarding", secNIC) output, debugNodeErr := exutil.DebugNode(oc, worker, "bash", "-c", cmd) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) if strings.Contains(output, ".forwarding = 1") { e2e.Logf("Disable IP forwarding for NIC %s on node %s ...", secNIC, worker) disableCMD := fmt.Sprintf("sysctl -w net.ipv4.conf.%s.forwarding=0", secNIC) _, debugNodeErr = exutil.DebugNode(oc, worker, "bash", "-c", disableCMD) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) } e2e.Logf("IP forwarding was disabled for NIC %s on node %s!", secNIC, worker) } func nbContructToMap(nbConstruct string) map[string]string { listKeyValues := strings.Split(nbConstruct, "\n") var tempMap map[string]string tempMap = make(map[string]string) for _, keyValPair := range listKeyValues { keyValItem := strings.SplitN(keyValPair, ":", 2) key := strings.Trim(keyValItem[0], " ") val := strings.TrimLeft(keyValItem[1], " ") tempMap[key] = val } return tempMap } // Create live migration job on Kubevirt cluster func (migrationjob *migrationDetails) createMigrationJob(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", migrationjob.template, "-p", "NAME="+migrationjob.name, "NAMESPACE="+migrationjob.namespace, "VMI="+migrationjob.virtualmachinesintance) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create migration job %v", migrationjob.name)) } // Delete migration job on Kubevirt cluster func (migrationjob *migrationDetails) deleteMigrationJob(oc *exutil.CLI) { removeResource(oc, true, true, "virtualmachineinstancemigration.kubevirt.io", migrationjob.name, "-n", migrationjob.namespace) } // Check all cluster operators status on the cluster func checkAllClusterOperatorsState(oc *exutil.CLI, interval int, timeout int) { operatorsString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "-o=jsonpath={.items[*].metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) var clusterOperators []string if operatorsString != "" { clusterOperators = strings.Split(operatorsString, " ") } for _, clusterOperator := range clusterOperators { errCheck := wait.Poll(time.Duration(interval)*time.Second, time.Duration(timeout)*time.Minute, func() (bool, error) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", clusterOperator).Output() if err != nil { e2e.Logf("Fail to get state for operator %s, error:%s. Trying again", clusterOperator, err) return false, err } if matched, _ := regexp.MatchString("True.*False.*False", output); !matched { e2e.Logf("Operator %s on hosted cluster is in state:%s", output) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(errCheck, "Timed out waiting for the expected condition") } } // Check OVNK health: OVNK pods health and ovnkube-node DS health func checkOVNKState(oc *exutil.CLI) error { // check all OVNK pods waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node") if !exutil.IsHypershiftHostedCluster(oc) { waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane") } // check ovnkube-node ds rollout status and confirm if rollout has triggered return wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { status, err := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", "openshift-ovn-kubernetes", "ds", "ovnkube-node", "--timeout", "5m").Output() o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(status, "rollout to finish") && strings.Contains(status, "successfully rolled out") { e2e.Logf("ovnkube rollout was triggerred and rolled out successfully") return true, nil } e2e.Logf("ovnkube rollout trigger hasn't happened yet. Trying again") return false, nil }) } func addDummyInferface(oc *exutil.CLI, nodeName, IP, nicName string) { e2e.Logf("Add a dummy interface %s on node %s \n", nicName, nodeName) cmd := fmt.Sprintf("ip link a %s type dummy && ip link set dev %s up && ip a add %s dev %s && ip a show %s", nicName, nicName, IP, nicName, nicName) output, debugNodeErr := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) e2e.Logf("The dummy interface was added. \n %s", output) } func addIPtoInferface(oc *exutil.CLI, nodeName, IP, nicName string) { e2e.Logf("Add IP address %s to interface %s on node %s \n", IP, nicName, nodeName) cmd := fmt.Sprintf("ip a show %s && ip a add %s dev %s", nicName, IP, nicName) _, debugNodeErr := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) } func delIPFromInferface(oc *exutil.CLI, nodeName, IP, nicName string) { e2e.Logf("Remove IP address %s from interface %s on node %s \n", IP, nicName, nodeName) cmd := fmt.Sprintf("ip a show %s && ip a del %s dev %s", nicName, IP, nicName) _, debugNodeErr := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) } func removeDummyInterface(oc *exutil.CLI, nodeName, nicName string) { e2e.Logf("Remove a dummy interface %s on node %s \n", nicName, nodeName) cmd := fmt.Sprintf("ip a show %s && ip link del %s type dummy", nicName, nicName) output, debugNodeErr := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd) nicNotExistStr := fmt.Sprintf("Device \"%s\" does not exist", nicName) if debugNodeErr != nil && strings.Contains(output, nicNotExistStr) { e2e.Logf("The dummy interface %s does not exist on node %s ! \n", nicName, nodeName) return } o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) e2e.Logf("The dummy interface %s was removed from node %s ! \n", nicName, nodeName) } func (kkPod *kubeletKillerPod) createKubeletKillerPodOnNode(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", kkPod.template, "-p", "NAME="+kkPod.name, "NAMESPACE="+kkPod.namespace, "NODENAME="+kkPod.nodename) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create Kubelet-Killer pod %v", kkPod.name)) } func getNodeNameByIPv4(oc *exutil.CLI, nodeIPv4 string) (nodeName string) { nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet) o.Expect(err).NotTo(o.HaveOccurred()) for _, node := range nodeList.Items { _, IPv4 := getNodeIP(oc, node.Name) if IPv4 == nodeIPv4 { nodeName = node.Name break } } return nodeName } // patch resource in specific namespace, this is useful when patching resource to hosted cluster that is in "-n clusters" namespace func patchResourceAsAdminNS(oc *exutil.CLI, ns, resource, patch string) { err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(resource, "-p", patch, "--type=merge", "-n", ns).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } // get proxy IP and port of hosted cluster func getProxyIPandPortOnHostedCluster(oc *exutil.CLI, hostedClusterName, namespace string) (string, string) { output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", hostedClusterName, "-n", namespace, "-o=jsonpath={.spec.configuration.proxy.httpProxy}").Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(output) != 0 { //match out the proxy IP re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) proxyIP := re.FindAllString(output, -1)[0] proxyPort := strings.Split(output, ":")[2] e2e.Logf("proxy IP is %s, proxy port is %s", proxyIP, proxyPort) return proxyIP, proxyPort } else { return "", "" } } // GetMachineNamesFromMachineSetOnROSA gets all Machines in a Machinepool on a classic ROSA cluster by label // This function only appliable to classic ROSA, as there is no "machine" resource on ROSA hosted cluster func getMachineNamesFromMachinePoolOnROSA(oc *exutil.CLI, machineSetName string, machineAPINamespace string) []string { e2e.Logf("Getting all Machines in a Machineset by specific label ...") machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machine", "-o=jsonpath={.items[*].metadata.name}", "-l", "machine.openshift.io/cluster-api-machine-type="+machineSetName, "-n", machineAPINamespace).Output() o.Expect(err).NotTo(o.HaveOccurred()) if machineNames != "" { return strings.Split(machineNames, " ") } else { return nil } } // Wait for machine on a classic ROSA to be ready - this function only appliable to classic ROSA, as there is no "machine" resource on ROSA hosted cluster func waitMachineOnROSAReady(oc *exutil.CLI, machineName string, namespace string) error { return wait.Poll(15*time.Second, 10*time.Minute, func() (bool, error) { status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machine", machineName, "-n", namespace, "-o=jsonpath={.status.phase}").Output() e2e.Logf("Machine %v status is %v", machineName, status) if err != nil || status == "" { e2e.Logf("Failed to get machine status: %v, retrying...", err) return false, nil } if !strings.Contains(status, "Running") { e2e.Logf("Machine %v is in %v, not in Running state, retrying...", status) return false, nil } return true, nil }) } type apbStaticExternalRoute struct { name string labelkey string labelvalue string ip1 string ip2 string bfd bool template string } type apbDynamicExternalRoute struct { name string labelKey string labelValue string podLabelKey string podLabelValue string namespaceLabelKey string namespaceLabelValue string bfd bool template string } func (sgwpr *apbStaticExternalRoute) deleteAPBExternalRoute(oc *exutil.CLI) { removeResource(oc, true, true, "apbexternalroute", sgwpr.name) } func (sgwpr *apbStaticExternalRoute) createAPBExternalRoute(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sgwpr.template, "-p", "NAME="+sgwpr.name, "LABELKEY="+sgwpr.labelkey, "LABELVALUE="+sgwpr.labelvalue, "IP1="+sgwpr.ip1, "IP2="+sgwpr.ip2, "BFD="+strconv.FormatBool(sgwpr.bfd)) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create apbexternalroute %v", sgwpr.name)) } func (sgwpr *apbDynamicExternalRoute) createAPBDynamicExternalRoute(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sgwpr.template, "-p", "NAME="+sgwpr.name, "LABELKEY="+sgwpr.labelKey, "LABELVALUE="+sgwpr.labelValue, "PODLABELKEY="+sgwpr.podLabelKey, "PODLABELVALUE="+sgwpr.podLabelValue, "NSLABELKEY="+sgwpr.namespaceLabelKey, "NSLABELVALUE="+sgwpr.namespaceLabelValue, "BFD="+strconv.FormatBool(sgwpr.bfd)) if err1 != nil { e2e.Logf("Could not create due to err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create APB External Route %s due to %v", sgwpr.name, err)) } func checkAPBExternalRouteStatus(oc *exutil.CLI, gwName string, expectedStatus string) error { checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, gwErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", gwName).Output() if gwErr != nil { e2e.Logf("Failed to get apbexternalroute %v, error: %s. Trying again", gwName, gwErr) return false, nil } if !strings.Contains(output, expectedStatus) { e2e.Logf("Expected status is %v, the apbexternalroute status is %v, trying again.", expectedStatus, output) return false, nil } return true, nil }) return checkErr } func checkEgressFWStatus(oc *exutil.CLI, fwName string, ns string, expectedStatus string) error { checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { output, fwErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", "-n", ns, fwName).Output() if fwErr != nil { e2e.Logf("Failed to get egressfirewall %v, error: %s. Trying again", fwName, fwErr) return false, nil } if !strings.Contains(output, expectedStatus) { e2e.Logf("Expected status is %v, the egressfirewall status is %v, trying again.", expectedStatus, output) return false, nil } return true, nil }) return checkErr } func checkNodeIdentityWebhook(oc *exutil.CLI) (string, error) { webhooks, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ValidatingWebhookConfiguration", "network-node-identity.openshift.io", "-o=jsonpath={.webhooks[*].name}").Output() return webhooks, err } func disableNodeIdentityWebhook(oc *exutil.CLI, namespace string, cmName string) (string, error) { _, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("configmap", cmName, "-n", namespace, "--from-literal=enabled=false").Output() o.Eventually(func() bool { result := true _, cmErr := oc.AsAdmin().Run("get").Args("configmap/"+cmName, "-n", namespace).Output() if cmErr != nil { e2e.Logf(fmt.Sprintf("Wait for configmap/%s to be created", cmName)) result = false } return result }, "60s", "5s").Should(o.BeTrue(), fmt.Sprintf("configmap/%sis not created", cmName)) return "", err } // get lr-policy-list from logical_router_policy table func getlrPolicyList(oc *exutil.CLI, nodeName, tableID string, expected bool) ([]string, error) { // get the ovnkube-node pod on the node ovnKubeNodePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(ovnKubeNodePod).ShouldNot(o.Equal("")) var lspOutput string var lspErr error var lrPolicyList []string lspCmd := "ovn-nbctl lr-policy-list ovn_cluster_router | grep '" + tableID + " '" checkLspErr := wait.Poll(10*time.Second, 2*time.Minute, func() (bool, error) { lspOutput, lspErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePod, "northd", lspCmd) if lspErr == nil && lspOutput != "" && expected { cmdOutputLines := strings.Split(lspOutput, "\n") for i := 0; i < len(cmdOutputLines); i++ { lrPolicyList = append(lrPolicyList, cmdOutputLines[i]) } return true, nil } // check lr-policy-list grep with tableID returned empty, usually there is "command terminated with exit code 1" to lspErr returned, so lspErr is not checked here if lspOutput != "ip4.src ==" && !expected { e2e.Logf("lr-policy-list of table %s is cleared up as expected", tableID) return true, nil } e2e.Logf("Waiting for expected result to be synced, try again ...") return false, nil }) if checkLspErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", lspOutput) } return lrPolicyList, checkLspErr } // Create a kubeconfig that impersonates ovnkube-node func generateKubeConfigFileForContext(oc *exutil.CLI, nodeName string, ovnKubeNodePod string, kubeConfigFilePath string, userContext string) bool { var ( pemFile = "/etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem" certFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" clusterName = "default-cluster" userName = "default-user" ) baseDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns/cluster", "-o=jsonpath={.spec.baseDomain}").Output() if err != nil || baseDomain == "" { e2e.Logf("Base Domain could not retrieved") return false } e2e.Logf("Base Domain %v", baseDomain) apiServerFQDN := fmt.Sprintf("api.%s", baseDomain) setUpClusterCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl config set-cluster %s --server=https://%s:6443 --certificate-authority %s --embed-certs", kubeConfigFilePath, clusterName, apiServerFQDN, certFile) setUserCredentialsCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl config set-credentials %s --client-key %s --client-certificate %s --embed-certs", kubeConfigFilePath, userName, pemFile, pemFile) setContextCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl config set-context %s --cluster %s --user %s", kubeConfigFilePath, userContext, clusterName, userName) testContextCmd := fmt.Sprintf("export KUBECONFIG=%s; kubectl config use-context %s; oc get nodes", kubeConfigFilePath, userContext) cmdOutput, cmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePod, "ovnkube-controller", setUpClusterCmd) if cmdErr != nil || !strings.Contains(cmdOutput, "Cluster "+"\""+clusterName+"\""+" set.") { e2e.Logf("Setting cluster for impersonation failed %v.", cmdErr) return false } e2e.Logf("Cluster set - %v", cmdOutput) cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePod, "ovnkube-controller", setUserCredentialsCmd) if cmdErr != nil || !strings.Contains(cmdOutput, "User "+"\""+userName+"\""+" set.") { e2e.Logf("Setting user credentials for impersonation failed %v.", cmdErr) return false } e2e.Logf("User credentials set - %v", cmdOutput) cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePod, "ovnkube-controller", setContextCmd) if cmdErr != nil || !strings.Contains(cmdOutput, "Context "+"\""+userContext+"\""+" created.") { e2e.Logf("Context creation for impersonation failed %v.", cmdErr) return false } e2e.Logf("Context created - %v", cmdOutput) cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubeNodePod, "ovnkube-controller", testContextCmd) if cmdErr != nil || !strings.Contains(cmdOutput, "Switched to context "+"\""+userContext+"\"") || !strings.Contains(cmdOutput, nodeName) { e2e.Logf("Test command for impersonation failed %v.", cmdErr) return false } e2e.Logf("Successfully created and tested kubeconfig for impersonation") return true } func findNodesWithSameSubnet(oc *exutil.CLI, nodeList []string) (bool, []string) { sameSubNode := make(map[string][]string) for _, node := range nodeList { subNet := getNodeSubnet(oc, node, "default") if _, ok := sameSubNode[subNet]; ok { sameSubNode[subNet] = append(sameSubNode[subNet], node) if len(sameSubNode[subNet]) >= 2 { return true, sameSubNode[subNet] } } else { sameSubNode[subNet] = []string{node} } } return false, nil } // Get endpoints for service:port in northdb of the node func getLBListEndpointsbySVCIPPortinNBDB(oc *exutil.CLI, nodeName, svcPort string) ([]string, error) { // get the ovnkube-node pod of the node ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(ovnKubePod).ShouldNot(o.Equal("")) var cmdOutput string var cmdErr error var endpoints []string lbCmd := "ovn-nbctl lb-list | grep \"" + svcPort + "\" | awk '{print $NF}'" checkOVNDbErr := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubePod, "northd", lbCmd) if cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } if cmdOutput != "" { cmdOutputLines := strings.Split(cmdOutput, ",") for i := 0; i < len(cmdOutputLines); i++ { endpoints = append(endpoints, cmdOutputLines[i]) } return true, nil } e2e.Logf("Waiting for expected result to be synced, try again ...") return false, nil }) if checkOVNDbErr != nil { e2e.Logf("The command check result in ovndb is not expected ! See below output \n %s ", cmdOutput) } return endpoints, checkOVNDbErr } // Get all pods with same label and also are in same state func getAllPodsWithLabelAndCertainState(oc *exutil.CLI, namespace string, label string, podState string) []string { var allPodsWithCertainState []string allPodsWithLabel, getPodErr := exutil.GetAllPodsWithLabel(oc, namespace, label) o.Expect(getPodErr).NotTo(o.HaveOccurred()) o.Expect(len(allPodsWithLabel)).ShouldNot(o.Equal(0)) for _, eachPod := range allPodsWithLabel { podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, eachPod).Output() if strings.Contains(podStatus, podState) { allPodsWithCertainState = append(allPodsWithCertainState, eachPod) } } return allPodsWithCertainState } // Get OVN-Kubernetes management interface (ovn-k8s-mp0) IPv6 address for the node func getOVNK8sNodeMgmtIPv6(oc *exutil.CLI, nodeName string) string { var cmdOutput string var err error checkErr := wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) { cmdOutput, err = exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "/usr/sbin/ip -o -6 addr show dev ovn-k8s-mp0 | awk '$3 == \"inet6\" && $6 == \"global\" {print $4}' | cut -d'/' -f1") if cmdOutput == "" || err != nil { e2e.Logf("Did not get node's IPv6 management interface, errors: %v, try again", err) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Failed to get IPv6 management interface for node %v, err: %v", nodeName, checkErr)) nodeOVNK8sMgmtIPv6 := strings.Split(cmdOutput, "\n")[0] return nodeOVNK8sMgmtIPv6 } // Get joint switch IP(s) by node name func getJoinSwitchIPofNode(oc *exutil.CLI, nodeName string) ([]string, []string) { // get the ovnkube-node pod on the node ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(ovnKubePod).ShouldNot(o.Equal("")) var cmdOutput string var joinSwitchIPv4s, joinSwitchIPv6s []string var cmdErr error cmd := "ovn-nbctl get logical_router_port rtoj-GR_" + nodeName + " networks" checkOVNDbErr := wait.Poll(3*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubePod, "northd", cmd) if cmdOutput == "" || cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkOVNDbErr, fmt.Sprintf("Failed to get join switch networks for node %v, err: %v", nodeName, checkOVNDbErr)) // output string would be something like: ["100.64.0.8/16", "fd98::8/64"] rightTrimed := strings.TrimRight(strings.TrimLeft(cmdOutput, "["), "]") //trim left [ and right ] from the output string outputs := strings.Split(rightTrimed, ", ") if len(outputs) > 0 { for _, str := range outputs { ipv4orv6 := strings.TrimRight(strings.TrimLeft(str, "\""), "\"") // trim left " and right " around IP address string if IsIPv4(ipv4orv6) { joinSwitchIPv4s = append(joinSwitchIPv4s, ipv4orv6) } if IsIPv6(ipv4orv6) { joinSwitchIPv6s = append(joinSwitchIPv6s, ipv4orv6) } } } return joinSwitchIPv4s, joinSwitchIPv6s } // Get host network IPs in NBDB of node func getHostNetworkIPsinNBDB(oc *exutil.CLI, nodeName string, externalID string) []string { // get the ovnkube-node pod on the node ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName) o.Expect(podErr).NotTo(o.HaveOccurred()) o.Expect(ovnKubePod).ShouldNot(o.Equal("")) var cmdOutput string var hostNetworkIPs []string var cmdErr error cmd := "ovn-nbctl --column address find address_set " + externalID checkOVNDbErr := wait.Poll(3*time.Second, 2*time.Minute, func() (bool, error) { cmdOutput, cmdErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKubePod, "northd", cmd) if cmdOutput == "" || cmdErr != nil { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkOVNDbErr, fmt.Sprintf("Failed to get host network IPs for node %v, err: %v", nodeName, checkOVNDbErr)) // two example outputs from "ovn-nbctl --column address find address_set <externalID>" command // addresses : ["10.128.0.2", "10.128.2.2", "10.129.0.2", "10.130.0.2", "10.130.2.2", "10.131.2.2", "100.64.0.2"] // addresses : ["fd01:0:0:1::2", "fd01:0:0:2::2", "fd01:0:0:3::2", "fd01:0:0:5::2", "fd01:0:0:7::2", "fd01:0:0:8::2"] // match out all IP (v4 or v6) addresses under " " re := regexp.MustCompile(`"[^",]+"`) ipStrs := re.FindAllString(cmdOutput, -1) for _, eachIpString := range ipStrs { ip := strings.TrimRight(strings.TrimLeft(eachIpString, "\""), "\"") //trim left " and right " from the string to get IP address hostNetworkIPs = append(hostNetworkIPs, ip) } return hostNetworkIPs } // Check if second array is a subset of first array func unorderedContains(first, second []string) bool { set := make(map[string]bool) for _, element := range first { set[element] = true } for _, element := range second { if !set[element] { return false } } return true } // Get all host CIDRs for a cluster node, including those for multiple interefaces func getAllHostCIDR(oc *exutil.CLI, nodeName string) ([]string, []string) { var allNodeIPsv4, allNodeIPsv6 []string outputString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.k8s\\.ovn\\.org\\/host-cidrs}").Output() o.Expect(err).NotTo(o.HaveOccurred()) // sample output from the command: ["172.22.0.237/24","192.168.111.25/24","fd2e:6f44:5dd8:c956::19/128"] hostCIDRsString := strings.TrimRight(strings.TrimLeft(outputString, "["), "]") // trim the left [ and right ] around the CIDRs string hostCIDRs := strings.Split(hostCIDRsString, ",") if len(hostCIDRs) != 0 { for _, eachCIDR := range hostCIDRs { ipString := strings.TrimRight(strings.TrimLeft(eachCIDR, "\""), "\"") // trim the left " and right "" around the IP string ip := strings.Split(ipString, "/")[0] //remove IP prefix, only get IP address if IsIPv4(ip) { allNodeIPsv4 = append(allNodeIPsv4, ip) } if IsIPv6(ip) { allNodeIPsv6 = append(allNodeIPsv6, ip) } } } e2e.Logf("\n cluster ipStackType: %s, for node %s, got all its v4 CIDRs: %v, v6 CIDRs: %v\n", checkIPStackType(oc), nodeName, allNodeIPsv4, allNodeIPsv6) return allNodeIPsv4, allNodeIPsv6 } // Check a node can be accessed from any of its host interface from a pod func checkNodeAccessibilityFromAPod(oc *exutil.CLI, nodeName, ns, podName string) bool { // Get all host IPs of the node ipStackType := checkIPStackType(oc) allNodeIPsv4, allNodeIPsv6 := getAllHostCIDR(oc, nodeName) if ipStackType == "dualstack" || ipStackType == "ipv4single" { for _, nodeIPv4Addr := range allNodeIPsv4 { _, err := e2eoutput.RunHostCmd(ns, podName, "ping -c 2 "+nodeIPv4Addr) if err != nil { e2e.Logf(fmt.Sprintf("Access to node %s failed at interface %s", nodeName, nodeIPv4Addr)) return false } } } if ipStackType == "dualstack" || ipStackType == "ipv6single" { for _, nodeIPv6Addr := range allNodeIPsv6 { _, err := e2eoutput.RunHostCmd(ns, podName, "ping -c 2 "+nodeIPv6Addr) if err != nil { e2e.Logf(fmt.Sprintf("Access to node %s failed at interface %s", nodeName, nodeIPv6Addr)) return false } } } return true } func verifySctpConnPod2IP(oc *exutil.CLI, namespace, sctpServerPodIP, sctpServerPodName, sctpClientPodname string, pass bool) { e2e.Logf("sctpserver pod start to wait for sctp traffic") msg, err := e2eoutput.RunHostCmdWithRetries(namespace, sctpServerPodName, "ps aux | grep sctp", 3*time.Second, 30*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp") { e2e.Logf("sctpserver pod is already listening on port 30102.") } else { cmdNcat, _, _, _ := oc.AsAdmin().Run("exec").Args("-n", namespace, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background() defer cmdNcat.Process.Kill() e2e.Logf("check sctp process enabled in the sctp server pod") o.Eventually(func() string { msg, err := e2eoutput.RunHostCmdWithRetries(namespace, sctpServerPodName, "ps aux | grep sctp", 3*time.Second, 30*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) return msg }, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod") } e2e.Logf("sctpclient pod start to send sctp traffic") e2eoutput.RunHostCmd(namespace, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }") e2e.Logf("server sctp process will end after get sctp traffic from sctp client") if pass { o.Eventually(func() string { msg, err := e2eoutput.RunHostCmdWithRetries(namespace, sctpServerPodName, "ps aux | grep sctp", 3*time.Second, 30*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) return msg }, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client") } else { msg, err := e2eoutput.RunHostCmd(namespace, sctpServerPodName, "ps aux | grep sctp") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(msg).Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process ended after get sctp traffic from sctp client") } } // Get apiVIP or ingessVIP on the cluster (vSphere or BM) func GetVIPOnCluster(oc *exutil.CLI, platform string, vipType string) []string { if !strings.Contains(platform, "baremetal") && !strings.Contains(platform, "vsphere") { g.Skip("Skip for non-vSphere/non-Baremetal cluster") } var cmdOutput, jsonpathstr string var err error var vips []string switch vipType { case "apiVIP": jsonpathstr = "-o=jsonpath={.status.platformStatus." + platform + ".apiServerInternalIPs}" case "ingressVIP": jsonpathstr = "-o=jsonpath={.status.platformStatus." + platform + ".ingressIPs}" default: e2e.Failf("VIP Type only can be apiVIP or ingressVIP") } checkErr := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 10*time.Second, false, func(ctx context.Context) (bool, error) { cmdOutput, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", jsonpathstr).Output() if cmdOutput == "" || err != nil { e2e.Logf("Did not get %s, errors: %v, try again", vipType, err) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Failed to get %s on this platform %v, err: %v", vipType, platform, checkErr)) // match out all IP (v4 or v6) addresses under " " re := regexp.MustCompile(`"[^",]+"`) ipStrs := re.FindAllString(cmdOutput, -1) for _, eachIpString := range ipStrs { ip := strings.TrimRight(strings.TrimLeft(eachIpString, "\""), "\"") //trim left " and right " from the string to get IP address vips = append(vips, ip) } return vips } // Find apiVIP or ingressVIP node on vSphere or BM func FindVIPNode(oc *exutil.CLI, vip string) string { nodeList, err := exutil.GetAllNodesbyOSType(oc, "linux") o.Expect(err).NotTo(o.HaveOccurred()) defaultInt, _ := getDefaultInterface(oc) for _, node := range nodeList { output, err := exutil.DebugNode(oc, node, "bash", "-c", "ip add show "+defaultInt) o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(output, vip) { e2e.Logf("Node %s is VIP node", node) return node } } return "" } // Return IPv4 address and IPv4 address with prefix func getIPv4AndIPWithPrefixForNICOnNode(oc *exutil.CLI, node, nic string) (string, string) { cmd := fmt.Sprintf("ip -4 -brief a show %s | awk '{print $3}' ", nic) output, debugNodeErr := exutil.DebugNode(oc, node, "bash", "-c", cmd) o.Expect(debugNodeErr).NotTo(o.HaveOccurred()) pattern := `(\d+\.\d+\.\d+\.\d+/\d+)` re := regexp.MustCompile(pattern) matches := re.FindStringSubmatch(output) o.Expect(len(matches) > 1).Should(o.BeTrue()) ipAddressWithPrefix := matches[1] e2e.Logf("IP address with prefix:", ipAddressWithPrefix) ipParts := strings.Split(ipAddressWithPrefix, "/") ipAddress := ipParts[0] e2e.Logf("The IPv4 of interface %s on node %s is %s and ipAddressWithPrefix is %s", nic, node, ipAddress, ipAddressWithPrefix) return ipAddress, ipAddressWithPrefix } // check respective config availability for IPsec NS on external host specific to Beijing BM host. // this func might be scaled up in future if we comes down to support net2net as well func applyConfigTypeExtHost(leftPublicIP, configType string) error { switch configType { case "host2hostTransportRDU2": err := sshRunCmd(leftPublicIP, "core", "sudo cp /home/core/nstest_host2host_transport.conf.bak.rdu2 /etc/ipsec.d/nstest.conf && sudo systemctl restart ipsec") if err != nil { return fmt.Errorf("Could not apply host2host config. Check External Host %v", err) } case "host2hostTunnelRDU2": err := sshRunCmd(leftPublicIP, "core", "sudo cp /home/core/nstest_host2host_tunnel.conf.bak.rdu2 /etc/ipsec.d/nstest.conf && sudo systemctl restart ipsec") if err != nil { return fmt.Errorf("Could not apply host2host config. Check External Host %v", err) } case "host2netTransportRDU2": err := sshRunCmd(leftPublicIP, "core", "sudo cp /home/core/nstest_host2net_transport.conf.rdu2 /etc/ipsec.d/nstest.conf && sudo systemctl restart ipsec") if err != nil { return fmt.Errorf("Could not apply host2net config. Check External Host %v", err) } case "host2netTunnelRDU2": err := sshRunCmd(leftPublicIP, "core", "sudo cp /home/core/nstest_host2net_tunnel.conf.rdu2 /etc/ipsec.d/nstest.conf && sudo systemctl restart ipsec") if err != nil { return fmt.Errorf("Could not apply host2net config. Check External Host %v", err) } } return nil } // get hostname for LB service, this fuction is likely to be useful only for AWS, other public cloud platforms may not give LB service hostname func getLBSVCHostname(oc *exutil.CLI, namespace, svc string) string { var LBSVCHostname string var cmdErr error platform := exutil.CheckPlatform(oc) if !strings.Contains(platform, "aws") { g.Skip("Skip for non-AWS cluster") } e2e.Logf("Getting the Load Balancer service hostname ...") getLBSVCHostnameErr := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { LBSVCHostname, cmdErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", svc, "-n", namespace, "-o=jsonpath={.status.loadBalancer.ingress[0].hostname}").Output() if cmdErr != nil || LBSVCHostname == "pending" || LBSVCHostname == "" { e2e.Logf("%v,Waiting for expected result to be synced, try again ...,", cmdErr) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(getLBSVCHostnameErr, fmt.Sprintf("Could not get LB service's hostname, err: %v", getLBSVCHostnameErr)) return LBSVCHostname } // get IP address of LB service func getLBSVCIP(oc *exutil.CLI, namespace string, svcName string) string { var svcExternalIP string var cmdErr error checkErr := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) { svcExternalIP, cmdErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.status.loadBalancer.ingress[0].ip}").Output() if svcExternalIP == "" || cmdErr != nil { e2e.Logf("Waiting for lb service IP assignment. Trying again...") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Failed to get externalIP to the externalIP service %s", svcName)) return svcExternalIP } func getNetworkDiagnosticsAvailable(oc *exutil.CLI) string { statusOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.config.openshift.io/cluster", "-o=jsonpath={.status.conditions[?(@.type == \"NetworkDiagnosticsAvailable\")].status}").Output() o.Expect(err).NotTo(o.HaveOccurred()) statusOutput = strings.ToLower(statusOutput) e2e.Logf("NetworkDiagnosticsAvailable status is %s", statusOutput) return statusOutput } func verifyDesitnationAccess(oc *exutil.CLI, podName, podNS, domainName string, passOrFail bool) { curlCmd := fmt.Sprintf("curl -s -I %s --connect-timeout 5 ", domainName) if passOrFail { _, err := e2eoutput.RunHostCmdWithRetries(podNS, podName, curlCmd, 10*time.Second, 20*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) ipStackType := checkIPStackType(oc) if ipStackType == "dualstack" { curlCmd = fmt.Sprintf("curl -s -6 -I %s --connect-timeout 5", domainName) _, err := e2eoutput.RunHostCmdWithRetries(podNS, podName, curlCmd, 10*time.Second, 20*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } } else { o.Eventually(func() error { _, err := e2eoutput.RunHostCmd(podNS, podName, curlCmd) return err }, "20s", "10s").Should(o.HaveOccurred()) } } // First ip is ipv4, secondary is ipv6. func getIPFromDnsName(dnsName string) (string, string) { ips, err := net.LookupIP(dnsName) o.Expect(err).NotTo(o.HaveOccurred()) var ipv4, ipv6 string for _, ip := range ips { if ip.To4() != nil && ipv4 == "" { ipv4 = ip.String() } else if strings.Contains(ip.String(), ":") && ipv6 == "" { ipv6 = ip.String() } if ipv4 != "" && ipv6 != "" { break } } e2e.Logf("The resovled IPv4, IPv6 address for dns name %s is %s,%s", dnsName, ipv4, ipv6) return ipv4, ipv6 } func verifyDstIPAccess(oc *exutil.CLI, podName, podNS, ip string, passOrFail bool) { var curlCmd string if strings.Contains(ip, ":") { e2e.Logf("The IP %s is IPv6 address.", ip) curlCmd = fmt.Sprintf("curl -s -6 -I [%s] --connect-timeout 5 ", ip) } else { e2e.Logf("The IP %s is IPv4 address.", ip) curlCmd = fmt.Sprintf("curl -s -I %s --connect-timeout 5 ", ip) } if passOrFail { _, err := e2eoutput.RunHostCmdWithRetries(podNS, podName, curlCmd, 10*time.Second, 120*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) } else { o.Eventually(func() error { _, err := e2eoutput.RunHostCmd(podNS, podName, curlCmd) return err }, "20s", "10s").Should(o.HaveOccurred()) } } // Function to obtain API VIP on BM cluster func GetAPIVIPOnCluster(oc *exutil.CLI) string { apiVIP := "" var err error o.Eventually(func() error { apiVIP, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.baremetal.apiServerInternalIP}").Output() return err }, "60s", "5s").ShouldNot(o.HaveOccurred()) return apiVIP } func (pod *httpserverPodResourceNode) createHttpservePodNodeByAdmin(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "CONTAINERPORT="+strconv.Itoa(int(pod.containerport)), "HOSTPORT="+strconv.Itoa(int(pod.hostport)), "NODENAME="+pod.nodename) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) } // CurlPod2NodePass checks connectivity from a pod to node that has httpserverPod on it func CurlPod2NodePass(oc *exutil.CLI, namespaceSrc, podNameSrc, nodeNameDst, DstHostPort string) { nodeIP2, nodeIP1 := getNodeIP(oc, nodeNameDst) if nodeIP2 != "" { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort)) o.Expect(err).NotTo(o.HaveOccurred()) _, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP2, DstHostPort)) o.Expect(err).NotTo(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort)) o.Expect(err).NotTo(o.HaveOccurred()) } } // CurlPod2PodFail ensures no connectivity from pod to node that has httpserverPod on it func CurlPod2NodeFail(oc *exutil.CLI, namespaceSrc, podNameSrc, nodeNameDst, DstHostPort string) { nodeIP2, nodeIP1 := getNodeIP(oc, nodeNameDst) if nodeIP2 != "" { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort)) o.Expect(err).To(o.HaveOccurred()) _, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP2, DstHostPort)) o.Expect(err).To(o.HaveOccurred()) } else { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort)) o.Expect(err).To(o.HaveOccurred()) } } // CurlPod2HostPass checks connectivity from a pod to host that has httpserverPod on it func CurlPod2HostPass(oc *exutil.CLI, namespaceSrc, podNameSrc, hostip, DstHostPort string) { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(hostip, DstHostPort)) o.Expect(err).NotTo(o.HaveOccurred()) } // CurlPod2HostFail ensures no connectivity from pod to host that has httpserverPod on it func CurlPod2HostFail(oc *exutil.CLI, namespaceSrc, podNameSrc, hostip, DstHostPort string) { _, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(hostip, DstHostPort)) o.Expect(err).To(o.HaveOccurred()) } // Check the cluster is fips enabled func checkFips(oc *exutil.CLI) bool { node, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "--selector=node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o=jsonpath={.items[0].metadata.name}").Output() o.Expect(err).NotTo(o.HaveOccurred()) fipsInfo, err := exutil.DebugNodeWithChroot(oc, node, "bash", "-c", "fips-mode-setup --check") o.Expect(err).NotTo(o.HaveOccurred()) if strings.Contains(fipsInfo, "FIPS mode is disabled.") { e2e.Logf("FIPS is not enabled.") return false } e2e.Logf("FIPS is enabled.") return true } // Check whether it is able to access public web with IPv6 address func checkIPv6PublicAccess(oc *exutil.CLI) bool { workNode, err := exutil.GetFirstWorkerNode(oc) o.Expect(err).ShouldNot(o.HaveOccurred()) curlCMD := "curl -6 www.google.com --connect-timeout 5 -I" output, err := exutil.DebugNode(oc, workNode, "bash", "-c", curlCMD) if !strings.Contains(output, "HTTP") || err != nil { e2e.Logf(output) e2e.Logf("Unable to access the public Internet with IPv6 from the cluster.") return false } e2e.Logf("Successfully connected to the public Internet with IPv6 from the cluster.") return true } func forceRebootNode(oc *exutil.CLI, nodeName string) { e2e.Logf("\nRebooting node %s....", nodeName) runCmd, _, _, runCmdErr := oc.AsAdmin().Run("debug").Args("node/"+nodeName, "--", "chroot", "/host", "reboot", "--force").Background() defer runCmd.Process.Kill() o.Expect(runCmdErr).NotTo(o.HaveOccurred()) waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False") } // Create resources in the specified namespace from the file (not template) that is expected to fail func createResourceFromFileWithError(oc *exutil.CLI, ns, file string) error { err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", ns).Execute() return err } // Struct to create pod with customized response type customResponsePodResource struct { name string namespace string labelKey string labelVal string responseStr string template string } func (pod *customResponsePodResource) createCustomResponsePod(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABELKEY="+pod.labelKey, "LABELVAL="+pod.labelVal, "RESPONSESTR="+pod.responseStr) if err1 != nil { e2e.Logf("the err:%v, and try again...", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create pod %s due to %v", pod.name, err)) } // Struct to create service with session ability type sessionAffinityServiceResource struct { name string namespace string ipFamilyPolicy string selLabelKey string SelLabelVal string template string } func (svc *sessionAffinityServiceResource) createSessionAffiniltyService(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", svc.template, "-p", "NAME="+svc.name, "NAMESPACE="+svc.namespace, "IPFAMILYPOLICY="+svc.ipFamilyPolicy, "SELLABELKEY="+svc.selLabelKey, "SELLABELVAL="+svc.SelLabelVal) if err1 != nil { e2e.Logf("the err:%v, and try again...", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create pservice %s due to %v", svc.name, err)) } func getEnabledFeatureGates(oc *exutil.CLI) ([]string, error) { enabledFeatureGates, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.status.featureGates[0].enabled[*].name}").Output() if err != nil { return nil, err } return strings.Split(enabledFeatureGates, " "), nil } // IsFeaturegateEnabled check whether a featuregate is in enabled or not func IsFeaturegateEnabled(oc *exutil.CLI, featuregate string) (bool, error) { enabledFeatureGates, err := getEnabledFeatureGates(oc) if err != nil { return false, err } for _, f := range enabledFeatureGates { if f == featuregate { return true, nil } } return false, nil } func SkipIfNoFeatureGate(oc *exutil.CLI, featuregate string) { enabled, err := IsFeaturegateEnabled(oc, featuregate) o.Expect(err).NotTo(o.HaveOccurred(), "Error getting enabled featuregates") if !enabled { g.Skip(fmt.Sprintf("Featuregate %s is not enabled in this cluster", featuregate)) } } // Create VF policy through NMstate func (vrf *VRFResource) createVRF(oc *exutil.CLI) error { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vrf.template, "-p", "NAME="+vrf.name, "INTFNAME="+vrf.intfname, "NODENAME="+vrf.nodename, "TABLEID="+strconv.Itoa(int(vrf.tableid))) if err1 != nil { e2e.Logf("Creating VRF on the node failed :%v, and try next round", err1) return false, nil } return true, nil }) if err != nil { return fmt.Errorf("fail to create VRF on the node %v", vrf.name) } return nil } func (namedPortPod *namedPortPodResource) createNamedPortPod(oc *exutil.CLI) { exutil.By("Creating named port pod from template") err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", namedPortPod.template, "-p", "NAME="+namedPortPod.name, "NAMESPACE="+namedPortPod.namespace, "PODLABELKEY="+namedPortPod.podLabelKey, "PODLABELVAL="+namedPortPod.podLabelVal, "PORTNAME="+namedPortPod.portname, "CONTAINERPORT="+strconv.Itoa(int(namedPortPod.containerport))) if err1 != nil { e2e.Logf("Error creating resource:%v, and trying again", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create named port pod %v", namedPortPod.name)) } func getTcpdumpOnNodeCmdFromPod(oc *exutil.CLI, nodeName, tcpdumpCmd, namespace, podname, cmdOnPod string) string { exutil.By("Enable tcpdump on node") cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("-n", "default", "node/"+nodeName, "--", "bash", "-c", tcpdumpCmd).Background() defer cmdTcpdump.Process.Kill() o.Expect(err).NotTo(o.HaveOccurred()) //Wait 5 seconds to let the tcpdump ready for capturing traffic time.Sleep(5 * time.Second) exutil.By("Curl external host:port from test pods") var tcpdumpErr error = nil checkErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { _, curlErr := e2eoutput.RunHostCmd(namespace, podname, cmdOnPod) if curlErr == nil { tcpdumpErr = cmdTcpdump.Wait() e2e.Logf("The captured tcpdump outout is: \n%s\n", cmdOutput.String()) } if curlErr != nil || tcpdumpErr != nil { e2e.Logf("Getting error at executing curl command: %v or at waiting for tcpdump: %v, try again ...", curlErr, tcpdumpErr) return false, nil } if cmdOutput.String() == "" { e2e.Logf("Did not capture tcpdump packets,try again ...") return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Unable to get tcpdump when curling from pod:%s from namespace: %s", podname, namespace)) cmdTcpdump.Process.Kill() return cmdOutput.String() } func collectMustGather(oc *exutil.CLI, dstDir string, imageStream string, parameters []string) (string, error) { args := []string{"must-gather"} if dstDir != "" { args = append(args, "--dest-dir="+dstDir) } if imageStream != "" { args = append(args, "--image-stream="+imageStream) } if len(parameters) > 0 { args = append(args, "--") for _, param := range parameters { args = append(args, param) } } output, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args(args...).Output() if err != nil { e2e.Logf("collect must-gather failed, err: %v", err) return "", err } return output, nil } func verifyPodConnCrossNodes(oc *exutil.CLI) bool { buildPruningBaseDir := exutil.FixturePath("testdata", "networking") helloDaemonset := filepath.Join(buildPruningBaseDir, "hello-pod-daemonset.yaml") pass := true exutil.By("Create a temporay project for pods to pods connection checking.") oc.SetupProject() ns := oc.Namespace() exutil.By("Create hello-pod-daemonset in namespace.") createResourceFromFile(oc, ns, helloDaemonset) err := waitForPodWithLabelReady(oc, ns, "name=hello-pod") exutil.AssertWaitPollNoErr(err, "ipsec pods are not ready after killing pluto") exutil.By("Checking pods connection") pods := getPodName(oc, ns, "name=hello-pod") for _, srcPod := range pods { for _, targetPod := range pods { if targetPod != srcPod { podIP1, podIP2 := getPodIP(oc, ns, targetPod) e2e.Logf("Curling from pod: %s with IP: %s\n", srcPod, podIP1) _, err := e2eoutput.RunHostCmd(ns, srcPod, "curl --connect-timeout 10 -s "+net.JoinHostPort(podIP1, "8080")) if err != nil { e2e.Logf("pods connection failed from %s to %s:8080", srcPod, podIP1) srcNode, err := exutil.GetPodNodeName(oc, ns, srcPod) o.Expect(err).NotTo(o.HaveOccurred()) dstnode, err := exutil.GetPodNodeName(oc, ns, targetPod) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("pods connection failed between nodes %s and %s", srcNode, dstnode) pass = false } if podIP2 != "" { e2e.Logf("Curling from pod: %s with IP: %s\n", srcPod, podIP2) _, err := e2eoutput.RunHostCmd(ns, srcPod, "curl --connect-timeout 10 -s "+net.JoinHostPort(podIP2, "8080")) if err != nil { e2e.Logf("pods connection failed from %s to %s:8080", srcPod, podIP2) srcNode, err := exutil.GetPodNodeName(oc, ns, srcPod) o.Expect(err).NotTo(o.HaveOccurred()) dstnode, err := exutil.GetPodNodeName(oc, ns, targetPod) o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("pods connection failed between nodes %s and %s", srcNode, dstnode) pass = false } } } } } e2e.Logf("The pods connection pass check is %v ", pass) return pass } func waitForPodsCount(oc *exutil.CLI, namespace, labelSelector string, expectedCount int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, namespace, labelSelector) if getPodErr != nil { e2e.Logf("Error fetching pods: %v, retrying...", getPodErr) return false, nil } if len(allPods) == expectedCount { return true, nil // Condition met, exit polling } e2e.Logf("Expected %d pods, but found %d. Retrying...", expectedCount, len(allPods)) return false, nil }) }
package networking
function
openshift/openshift-tests-private
81e8e4d3-7a63-41c2-9243-4ec856a2a607
createPingPod
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['pingPodResource']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (pod *pingPodResource) createPingPod(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) }
networking
function
openshift/openshift-tests-private
dd3087c0-9424-4046-ae97-2486a4621132
createPingPodNode
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['pingPodResourceNode']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (pod *pingPodResourceNode) createPingPodNode(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "NODENAME="+pod.nodename) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) }
networking
function
openshift/openshift-tests-private
9fdf98f9-e1aa-4eba-8c9c-12188bbb2a3e
createPingPodWinNode
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['pingPodResourceWinNode']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (pod *pingPodResourceWinNode) createPingPodWinNode(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "IMAGE="+pod.image, "NODENAME="+pod.nodename) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) }
networking
function
openshift/openshift-tests-private
1511bd9c-6f98-49c2-b452-8827c471f015
createTestPodMultinetwork
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['testPodMultinetwork']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (pod *testPodMultinetwork) createTestPodMultinetwork(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "NODENAME="+pod.nodename, "LABELNAME="+pod.labelname, "NADNAME="+pod.nadname) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name)) }
networking
function
openshift/openshift-tests-private
29e98055-36f7-4990-9f8f-216696033752
applyResourceFromTemplate
['"encoding/json"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error { var configFile string err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) { output, err := oc.Run("process").Args(parameters...).OutputToFile(getRandomString() + "ping-pod.json") if err != nil { e2e.Logf("the err:%v, and try next round", err) return false, nil } configFile = output return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters)) e2e.Logf("the file of resource is %s", configFile) return oc.WithoutNamespace().Run("apply").Args("-f", configFile).Execute() }
networking
function
openshift/openshift-tests-private
1cc3671f-f8a2-4328-9a2f-b340d4b0552a
createEgressIPObject1
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['egressIPResource1']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (egressIP *egressIPResource1) createEgressIPObject1(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressIP.template, "-p", "NAME="+egressIP.name, "EGRESSIP1="+egressIP.egressIP1, "EGRESSIP2="+egressIP.egressIP2) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressIP %v", egressIP.name)) }
networking
function
openshift/openshift-tests-private
dee41fdb-981a-417c-8e40-fa59520f7de1
deleteEgressIPObject1
['egressIPResource1']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (egressIP *egressIPResource1) deleteEgressIPObject1(oc *exutil.CLI) { removeResource(oc, true, true, "egressip", egressIP.name) }
networking
function
openshift/openshift-tests-private
d98c2914-2156-4700-a580-eaaa715cd6e6
createEgressIPObject2
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['egressIPResource1']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (egressIP *egressIPResource1) createEgressIPObject2(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressIP.template, "-p", "NAME="+egressIP.name, "EGRESSIP1="+egressIP.egressIP1, "NSLABELKEY="+egressIP.nsLabelKey, "NSLABELVALUE="+egressIP.nsLabelValue, "PODLABELKEY="+egressIP.podLabelKey, "PODLABELVALUE="+egressIP.podLabelValue) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressIP %v", egressIP.name)) }
networking
function
openshift/openshift-tests-private
aea3c8e3-c20d-4bbf-8a65-dad8f29f9dd8
createEgressFWObject1
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['egressFirewall1']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (egressFirewall *egressFirewall1) createEgressFWObject1(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressFirewall.template, "-p", "NAME="+egressFirewall.name, "NAMESPACE="+egressFirewall.namespace) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressFW %v", egressFirewall.name)) }
networking
function
openshift/openshift-tests-private
ec87e7c1-ea1a-4c1f-98c4-d9eb5a42573a
deleteEgressFWObject1
['egressFirewall1']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (egressFirewall *egressFirewall1) deleteEgressFWObject1(oc *exutil.CLI) { removeResource(oc, true, true, "egressfirewall", egressFirewall.name, "-n", egressFirewall.namespace) }
networking
function
openshift/openshift-tests-private
2a5cc478-83a8-487f-8b8c-01be68b11604
createEgressFW2Object
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['egressFirewall2']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (egressFirewall *egressFirewall2) createEgressFW2Object(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", egressFirewall.template, "-p", "NAME="+egressFirewall.name, "NAMESPACE="+egressFirewall.namespace, "RULETYPE="+egressFirewall.ruletype, "CIDR="+egressFirewall.cidr) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressFW2 %v", egressFirewall.name)) }
networking
function
openshift/openshift-tests-private
6a2c89ad-2ccd-4411-b5a7-22d01d7b4fa7
createEgressFW5Object
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['egressFirewall5']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (EFW *egressFirewall5) createEgressFW5Object(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { parameters := []string{"--ignore-unknown-parameters=true", "-f", EFW.template, "-p", "NAME=" + EFW.name, "NAMESPACE=" + EFW.namespace, "RULETYPE1=" + EFW.ruletype1, "RULENAME1=" + EFW.rulename1, "RULEVALUE1=" + EFW.rulevalue1, "PROTOCOL1=" + EFW.protocol1, "PORTNUMBER1=" + strconv.Itoa(EFW.portnumber1), "RULETYPE2=" + EFW.ruletype2, "RULENAME2=" + EFW.rulename2, "RULEVALUE2=" + EFW.rulevalue2, "PROTOCOL2=" + EFW.protocol2, "PORTNUMBER2=" + strconv.Itoa(EFW.portnumber2)} err1 := applyResourceFromTemplateByAdmin(oc, parameters...) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create EgressFW2 %v", EFW.name)) }
networking
function
openshift/openshift-tests-private
40ea9015-7b2a-4ce1-8e3a-5bf956b119c9
createEgressNetworkPolicyObj
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['egressNetworkpolicy']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (eNPL *egressNetworkpolicy) createEgressNetworkPolicyObj(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { parameters := []string{"--ignore-unknown-parameters=true", "-f", eNPL.template, "-p", "NAME=" + eNPL.name, "NAMESPACE=" + eNPL.namespace, "RULETYPE=" + eNPL.ruletype, "RULENAME=" + eNPL.rulename, "RULEVALUE=" + eNPL.rulevalue} err1 := applyResourceFromTemplateByAdmin(oc, parameters...) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create EgressNetworkPolicy %v in Namespace %v", eNPL.name, eNPL.namespace)) }
networking
function
openshift/openshift-tests-private
cb426f91-7c15-4715-861a-2e1dfd498e11
createipBlockCIDRObjectDual
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['ipBlockCIDRsDual']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (ipBlock_policy *ipBlockCIDRsDual) createipBlockCIDRObjectDual(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_policy.template, "-p", "NAME="+ipBlock_policy.name, "NAMESPACE="+ipBlock_policy.namespace, "cidrIpv6="+ipBlock_policy.cidrIpv6, "cidrIpv4="+ipBlock_policy.cidrIpv4) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_policy.name)) }
networking
function
openshift/openshift-tests-private
167e6911-3175-4f51-ae03-34294876b581
createipBlockCIDRObjectSingle
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['ipBlockCIDRsSingle']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (ipBlock_policy *ipBlockCIDRsSingle) createipBlockCIDRObjectSingle(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_policy.template, "-p", "NAME="+ipBlock_policy.name, "NAMESPACE="+ipBlock_policy.namespace, "CIDR="+ipBlock_policy.cidr) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_policy.name)) }
networking
function
openshift/openshift-tests-private
b2f1ce24-b58a-47d6-bd5b-875660556368
createipBlockExceptObjectDual
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['ipBlockCIDRsExceptDual']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (ipBlock_except_policy *ipBlockCIDRsExceptDual) createipBlockExceptObjectDual(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { policyApplyError := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_except_policy.template, "-p", "NAME="+ipBlock_except_policy.name, "NAMESPACE="+ipBlock_except_policy.namespace, "CIDR_IPv6="+ipBlock_except_policy.cidrIpv6, "EXCEPT_IPv6="+ipBlock_except_policy.cidrIpv6Except, "CIDR_IPv4="+ipBlock_except_policy.cidrIpv4, "EXCEPT_IPv4="+ipBlock_except_policy.cidrIpv4Except) if policyApplyError != nil { e2e.Logf("the err:%v, and try next round", policyApplyError) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_except_policy.name)) }
networking
function
openshift/openshift-tests-private
247a3c06-f5bd-49bb-aaf5-a1e142301634
createipBlockExceptObjectSingle
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['ipBlockCIDRsExceptSingle']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (ipBlock_except_policy *ipBlockCIDRsExceptSingle) createipBlockExceptObjectSingle(oc *exutil.CLI, except bool) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { policyApplyError := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_except_policy.template, "-p", "NAME="+ipBlock_except_policy.name, "NAMESPACE="+ipBlock_except_policy.namespace, "CIDR="+ipBlock_except_policy.cidr, "EXCEPT="+ipBlock_except_policy.except) if policyApplyError != nil { e2e.Logf("the err:%v, and try next round", policyApplyError) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_except_policy.name)) }
networking
function
openshift/openshift-tests-private
4a7f74f8-da41-4e0f-9170-0951ce996991
createIPBlockMultipleCIDRsObjectDual
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
['ipBlockCIDRsDual']
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
func (ipBlock_cidrs_policy *ipBlockCIDRsDual) createIPBlockMultipleCIDRsObjectDual(oc *exutil.CLI) { err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipBlock_cidrs_policy.template, "-p", "NAME="+ipBlock_cidrs_policy.name, "NAMESPACE="+ipBlock_cidrs_policy.namespace, "cidrIpv6="+ipBlock_cidrs_policy.cidrIpv6, "cidrIpv4="+ipBlock_cidrs_policy.cidrIpv4, "cidr2Ipv4="+ipBlock_cidrs_policy.cidr2Ipv4, "cidr2Ipv6="+ipBlock_cidrs_policy.cidr2Ipv6, "cidr3Ipv4="+ipBlock_cidrs_policy.cidr3Ipv4, "cidr3Ipv6="+ipBlock_cidrs_policy.cidr3Ipv6) if err1 != nil { e2e.Logf("the err:%v, and try next round", err1) return false, nil } return true, nil }) exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create network policy %v", ipBlock_cidrs_policy.name)) }
networking