element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
de11ef7d-9e02-4483-b430-1c809bdba6da
|
createMultusNADforUshift
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func createMultusNADforUshift(oc *exutil.CLI, pod_pmtrs map[string]string, MultusNADGenericYaml string) (err error) {
for rep, value := range pod_pmtrs {
MultusNADGenericYaml = strings.ReplaceAll(MultusNADGenericYaml, rep, value)
}
MultusNADFileName := "MultusNAD-" + getRandomString() + ".yaml"
defer os.Remove(MultusNADFileName)
os.WriteFile(MultusNADFileName, []byte(MultusNADGenericYaml), 0644)
// create multus NAD for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", MultusNADFileName).Output()
return err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
6164a786-0c64-433b-8a1e-6e228ddf92e7
|
createMultusPodforUshift
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func createMultusPodforUshift(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) {
MultusPodGenericYaml := getFileContentforUshift("microshift", "multus-pod-generic.yaml")
//replace all variables as per createMultusPodforUshift() arguements
for rep, value := range pod_pmtrs {
MultusPodGenericYaml = strings.ReplaceAll(MultusPodGenericYaml, rep, value)
}
MultusPodFileName := "MultusPod-" + getRandomString() + ".yaml"
defer os.Remove(MultusPodFileName)
os.WriteFile(MultusPodFileName, []byte(MultusPodGenericYaml), 0644)
// create MultusPod for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", MultusPodFileName).Output()
return err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
f52a8163-ea5b-4dd8-a2d3-d13c887f070a
|
enableDHCPforCNI
|
['"fmt"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func enableDHCPforCNI(oc *exutil.CLI, nodeName string) {
cmdAddlink := "ip link add testbr1 type bridge"
_, cmdAddlinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdAddlink)
o.Expect(cmdAddlinkErr).NotTo(o.HaveOccurred())
cmdAddIPv4 := "ip address add 88.8.8.2/24 dev testbr1"
_, cmdAddIPv4Err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdAddIPv4)
o.Expect(cmdAddIPv4Err).NotTo(o.HaveOccurred())
cmdAddIPv6 := "ip address add fd00:dead:beef:10::2/64 dev testbr1"
_, cmdAddIPv6Err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdAddIPv6)
o.Expect(cmdAddIPv6Err).NotTo(o.HaveOccurred())
cmdUplink := "ip link set up testbr1"
_, cmdUplinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdUplink)
o.Expect(cmdUplinkErr).NotTo(o.HaveOccurred())
cmdShowIP := "ip add show testbr1"
cmdShowIPOutput, cmdShowIPErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdShowIP)
o.Expect(cmdShowIPErr).NotTo(o.HaveOccurred())
o.Expect(cmdShowIPOutput).To(o.ContainSubstring("88.8.8.2"))
dnsmasqFile := "/etc/dnsmasq.conf"
cmdConfigdnsmasq := fmt.Sprintf(`cat > %v << EOF
no-resolv
expand-hosts
bogus-priv
domain=mydomain.net
local=/mydomain.net/
interface=testbr1
dhcp-range=88.8.8.10,88.8.8.250,24h
enable-ra
dhcp-range=tag:testbr1,::1,constructor:testbr1,ra-names,12h
bind-interfaces`, dnsmasqFile)
_, cmdConfigdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdConfigdnsmasq)
o.Expect(cmdConfigdnsmasqErr).NotTo(o.HaveOccurred())
cmdRestartdnsmasq := "systemctl restart dnsmasq --now"
_, cmdRestartdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdRestartdnsmasq)
o.Expect(cmdRestartdnsmasqErr).NotTo(o.HaveOccurred())
cmdCheckdnsmasq := "systemctl status dnsmasq"
cmdCheckdnsmasqOutput, cmdCheckdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdCheckdnsmasq)
o.Expect(cmdCheckdnsmasqErr).NotTo(o.HaveOccurred())
o.Expect(cmdCheckdnsmasqOutput).To(o.ContainSubstring("active (running)"))
addDHCPFirewall := "firewall-cmd --add-service=dhcp"
_, addDHCPFirewallErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", addDHCPFirewall)
o.Expect(addDHCPFirewallErr).NotTo(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
e4c61750-d1eb-4d25-bbdc-92c3d7ecbe81
|
disableDHCPforCNI
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func disableDHCPforCNI(oc *exutil.CLI, nodeName string) {
cmdDelIP := "ip address del 88.8.8.2/24 dev testbr1"
_, cmdDelIPErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDelIP)
o.Expect(cmdDelIPErr).NotTo(o.HaveOccurred())
cmdDelIPv6 := "ip address del fd00:dead:beef:10::2/64 dev testbr1"
_, cmdDelIPv6Err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDelIPv6)
o.Expect(cmdDelIPv6Err).NotTo(o.HaveOccurred())
cmdDownlink := "ip link set down testbr1"
_, cmdDownlinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDownlink)
o.Expect(cmdDownlinkErr).NotTo(o.HaveOccurred())
cmdDellink := "ip link delete testbr1"
_, cmdDellinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDellink)
o.Expect(cmdDellinkErr).NotTo(o.HaveOccurred())
cmdStopdnsmasq := "systemctl stop dnsmasq --now"
_, cmdStopdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdStopdnsmasq)
o.Expect(cmdStopdnsmasqErr).NotTo(o.HaveOccurred())
cmdDeldnsmasqFile := "rm /etc/dnsmasq.conf"
_, cmdDeldnsmasqFileErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDeldnsmasqFile)
o.Expect(cmdDeldnsmasqFileErr).NotTo(o.HaveOccurred())
remDHCPFirewall := "firewall-cmd --remove-service=dhcp"
_, remDHCPFirewallErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", remDHCPFirewall)
o.Expect(remDHCPFirewallErr).NotTo(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
05eab857-96b0-41a5-b534-195d1bc9ed69
|
getMicroshiftPodMultiNetworks
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func getMicroshiftPodMultiNetworks(oc *exutil.CLI, namespace string, podName string, netName string) (string, string) {
cmd1 := "ip a sho " + netName + " | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmd2 := "ip a sho " + netName + " | awk 'NR==7{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'"
podv4Output, err := e2eoutput.RunHostCmd(namespace, podName, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
podIPv4 := strings.TrimSpace(podv4Output)
podv6Output, err1 := e2eoutput.RunHostCmd(namespace, podName, cmd2)
o.Expect(err1).NotTo(o.HaveOccurred())
podIPv6 := strings.TrimSpace(podv6Output)
return podIPv4, podIPv6
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8fe2460b-1cdd-4c47-aebf-5e98d133f07b
|
checkMicroshiftIPStackType
|
['"io"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func checkMicroshiftIPStackType(oc *exutil.CLI) string {
podNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("pod", "-n", "openshift-dns", "-l", "dns.operator.openshift.io/daemonset-node-resolver",
"-o=jsonpath='{ .items[*].status.podIPs[*].ip }'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pod network is %v", podNetwork)
if strings.Count(podNetwork, ":") >= 2 && strings.Count(podNetwork, ".") >= 2 {
return "dualstack"
} else if strings.Count(podNetwork, ":") >= 2 {
return "ipv6single"
} else if strings.Count(podNetwork, ".") >= 2 {
return "ipv4single"
}
return ""
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
f91ac8f5-447e-46db-b6ff-8b966a948424
|
getMicroshiftNodeIPV6
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func getMicroshiftNodeIPV6(oc *exutil.CLI) string {
ipStack := checkMicroshiftIPStackType(oc)
o.Expect(ipStack).ShouldNot(o.BeEmpty())
o.Expect(ipStack).NotTo(o.Equal("ipv4single"))
nodeName := getMicroshiftNodeName(oc)
if ipStack == "ipv6single" {
e2e.Logf("Its a Single Stack Cluster, either IPv4 or IPv6")
InternalIP, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[?(@.type==\"InternalIP\")].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's Internal IP is %q", InternalIP)
return InternalIP
}
if ipStack == "dualstack" {
e2e.Logf("Its a Dual Stack Cluster")
InternalIP1, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[0].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 1st Internal IP is %q", InternalIP1)
InternalIP2, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[1].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 2nd Internal IP is %q", InternalIP2)
if netutils.IsIPv6String(InternalIP1) {
return InternalIP1
}
return InternalIP2
}
return ""
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
600e312c-5a33-40fd-a1c6-ec6cccb84250
|
getMicroshiftNodeIP
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func getMicroshiftNodeIP(oc *exutil.CLI, nodeName string) (string, string) {
ipStack := checkMicroshiftIPStackType(oc)
if (ipStack == "ipv6single") || (ipStack == "ipv4single") {
e2e.Logf("Its a Single Stack Cluster, either IPv4 or IPv6")
InternalIP, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[?(@.type==\"InternalIP\")].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's Internal IP is %q", InternalIP)
return "", InternalIP
}
e2e.Logf("Its a Dual Stack Cluster")
InternalIP1, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[0].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 1st Internal IP is %q", InternalIP1)
InternalIP2, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[1].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 2nd Internal IP is %q", InternalIP2)
if netutils.IsIPv6String(InternalIP1) {
return InternalIP1, InternalIP2
}
return InternalIP2, InternalIP1
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
39127c47-8c57-4beb-a590-cb6be310f6ef
|
getMicroshiftNodeName
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func getMicroshiftNodeName(oc *exutil.CLI) string {
nodeName, err := oc.AsAdmin().Run("get").Args("nodes", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return nodeName
}
|
networking
| |||||
test
|
openshift/openshift-tests-private
|
274bebf5-f9db-44d8-b33e-271bec38cb91
|
multicast_udn
|
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_udn.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = g.Describe("[sig-networking] SDN udn/default multicast", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-udn", exutil.KubeConfigPath())
testDataDirMcast = exutil.FixturePath("testdata", "networking/multicast")
)
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
g.It("Author:yingwang-High-78447-udn pods should/should not receive multicast traffic when enable/disable multicast.", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns string
ipStackType = checkIPStackType(oc)
ipv4List []string
ipv6List []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("1. create udn namespace")
oc.CreateNamespaceUDN()
ns = oc.Namespace()
exutil.By("2. Create CRD for UDN")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, ns, "udn-78447", ipv4cidr, ipv6cidr, cidr, "layer3")
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns = oc.Namespace()
}
exutil.By("3. Create 3 multicast testing pods")
mcastPodRc := networkingRes{
name: "mcastpod-rc",
namespace: ns,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc.kind, mcastPodRc.name, "-n", mcastPodRc.namespace)
mcastPodRc.create(oc, "RCNAME="+mcastPodRc.name, "-n", mcastPodRc.namespace)
err := waitForPodWithLabelReady(oc, ns, "name="+mcastPodRc.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc.name+" not ready")
mcastPodList := getPodName(oc, ns, "name="+mcastPodRc.name)
exutil.By("4. check multicast traffic without enable multicast in ns")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List = getPodIPv4UDNList(oc, ns, mcastPodList)
} else {
ipv4List = getPodIPv4List(oc, ns, mcastPodList)
}
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeFalse())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
chkRes2 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes2).Should(o.BeFalse())
}
exutil.By("5. enable multicast and check multicast traffic again")
enableMulticast(oc, ns)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
chkRes2 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes2).Should(o.BeTrue())
}
exutil.By("6. disable multicast and check multicast traffic again")
disableMulticast(oc, ns)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeFalse())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
chkRes2 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes2).Should(o.BeFalse())
}
}
})
/*** multicast layer2 cases failed due to bug OCPBUGS-48731, will recommit related cases once it fixed.
g.It("Author:yingwang-High-78446-Delete/add udn pods should not affect other pods to receive multicast traffic (layer 2).", func() {
var (
udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_dualstack_template.yaml")
udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml")
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns string
ipStackType = checkIPStackType(oc)
ipv4List []string
ipv6List []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
intf string
port = "4321"
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("###1. create udn namespace")
oc.CreateNamespaceUDN()
ns = oc.Namespace()
intf = "ovn-udn1"
exutil.By("###2. Create CRD for UDN")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-78446",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-78446",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDSingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns = oc.Namespace()
intf = "eth0"
}
exutil.By("###3. Create 3 multicast testing pods")
mcastPodRc := networkingRes{
name: "mcastpod-rc",
namespace: ns,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc.kind, mcastPodRc.name, "-n", ns)
mcastPodRc.create(oc, "RCNAME="+mcastPodRc.name, "-n", mcastPodRc.namespace)
err := waitForPodWithLabelReady(oc, ns, "name="+mcastPodRc.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc.name+" not ready")
mcastPodList := getPodName(oc, ns, "name="+mcastPodRc.name)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List = getPodIPv4UDNList(oc, ns, mcastPodList)
} else {
ipv4List = getPodIPv4List(oc, ns, mcastPodList)
}
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
}
exutil.By("###4. enable multicast and check multicast traffic")
enableMulticast(oc, ns)
//delete one pod druing sending traffic, and check the rest 2 pods still can receive mucast traffic
pktFile1 := "/tmp/" + getRandomString() + ".txt"
pktFile2 := "/tmp/" + getRandomString() + ".txt"
pktFile3 := "/tmp/" + getRandomString() + ".txt"
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
startMcastTrafficOnPod(oc, ns, mcastPodList[0], ipv4List, pktFile1, mcastipv4, port)
startMcastTrafficOnPod(oc, ns, mcastPodList[1], ipv4List, pktFile2, mcastipv4, port)
//add sleep time to make sure traffic started.
time.Sleep(5 * time.Second)
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4)
chkMcastAddress(oc, ns, mcastPodList[1], intf, mcastipv4)
//startMcastTrafficOnPod(oc, ns, mcastPodList[2], ipv4List, pktFile3)
removeResource(oc, true, true, "pod", mcastPodList[2], "-n", ns)
//add sleep time to make sure traffic completed.
time.Sleep(20 * time.Second)
chkRes1 := chkMcatRcvOnPod(oc, ns, mcastPodList[0], ipv4List[0], ipv4List, mcastipv4, pktFile1)
chkRes2 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4, pktFile2)
o.Expect(chkRes1).Should(o.BeTrue())
o.Expect(chkRes2).Should(o.BeTrue())
}
if ipStackType == "dualstack" {
//tested the new rc pod for ipv6
mcastPodList = getPodName(oc, ns, "name="+mcastPodRc.name)
if i == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
startMcastTrafficOnPod(oc, ns, mcastPodList[0], ipv6List, pktFile1, mcastipv6, port)
startMcastTrafficOnPod(oc, ns, mcastPodList[1], ipv6List, pktFile2, mcastipv6, port)
startMcastTrafficOnPod(oc, ns, mcastPodList[2], ipv6List, pktFile3, mcastipv6, port)
//add sleep time to make sure traffic started.
time.Sleep(5 * time.Second)
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv6)
chkMcastAddress(oc, ns, mcastPodList[1], intf, mcastipv6)
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6)
removeResource(oc, true, true, "pod", mcastPodList[2], "-n", ns)
//add sleep time to make sure traffic completed.
time.Sleep(20 * time.Second)
chkRes1 := chkMcatRcvOnPod(oc, ns, mcastPodList[0], ipv6List[0], ipv6List, mcastipv6, pktFile1)
chkRes2 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv6List[1], ipv6List, mcastipv6, pktFile2)
chkRes3 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6, pktFile3)
o.Expect(chkRes1).Should(o.BeTrue())
o.Expect(chkRes2).Should(o.BeTrue())
o.Expect(chkRes3).Should(o.BeTrue())
}
}
})
g.It("Author:yingwang-High-78381-CUDN pods should be able to subscribe send and receive multicast traffic (layer 2).", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns string
key = "test.cudn.layer2"
ipStackType = checkIPStackType(oc)
ipv4List []string
ipv6List []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
crdName = "cudn-network-78381"
values = []string{"value-78381-1", "value-78381-2"}
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("1. create 2 namespaces for CUDN")
oc.CreateNamespaceUDN()
ns = oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", key)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", key, values[0])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2. create CUDN in cudnNS")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err = createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer2", values)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns = oc.Namespace()
}
exutil.By("3. Create 3 multicast testing pods")
mcastPodRc := networkingRes{
name: "mcastpod-rc",
namespace: ns,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc.kind, mcastPodRc.name, "-n", ns)
mcastPodRc.create(oc, "RCNAME="+mcastPodRc.name, "-n", ns)
err := waitForPodWithLabelReady(oc, ns, "name="+mcastPodRc.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc.name+" not ready")
mcastPodList := getPodName(oc, ns, "name="+mcastPodRc.name)
exutil.By("4. enable mulitcast and send multicast traffic")
enableMulticast(oc, ns)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List = getPodIPv4UDNList(oc, ns, mcastPodList)
} else {
ipv4List = getPodIPv4List(oc, ns, mcastPodList)
}
chkRes := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes1).Should(o.BeTrue())
}
}
})
***/
g.It("Author:yingwang-High-78448-udn pods can join different multicast groups at same time.", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns string
ipStackType = checkIPStackType(oc)
ipv4List []string
ipv6List []string
mcastipv4 = []string{"232.43.211.234", "232.43.211.235", "232.43.211.236"}
mcastipv6 = []string{"ff3e::4321:1234", "ff3e::4321:1235", "ff3e::4321:1236"}
intf string
port = []string{"4321", "4322", "4323"}
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for j := 0; j < 2; j++ {
if j == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("###1. create udn namespace")
oc.CreateNamespaceUDN()
ns = oc.Namespace()
intf = "ovn-udn1"
exutil.By("###2. Create CRD for UDN")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, ns, "udn-78448", ipv4cidr, ipv6cidr, cidr, "layer3")
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns = oc.Namespace()
intf = "eth0"
}
exutil.By("###3. Create 3 multicast testing pods")
mcastPodRc := networkingRes{
name: "mcastpod-rc",
namespace: ns,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc.kind, mcastPodRc.name, "-n", ns)
mcastPodRc.create(oc, "RCNAME="+mcastPodRc.name, "-n", mcastPodRc.namespace)
err := waitForPodWithLabelReady(oc, ns, "name="+mcastPodRc.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc.name+" not ready")
mcastPodList := getPodName(oc, ns, "name="+mcastPodRc.name)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if j == 0 {
ipv4List = getPodIPv4UDNList(oc, ns, mcastPodList)
} else {
ipv4List = getPodIPv4List(oc, ns, mcastPodList)
}
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if j == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
}
exutil.By("###4. enable multicast and check multicast traffic")
enableMulticast(oc, ns)
//send multicast traffic to join different multicast group at the same time
pktFile1 := make([]string, len(mcastPodList))
pktFile2 := make([]string, len(mcastPodList))
pktFile3 := make([]string, len(mcastPodList))
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
for i, podName := range mcastPodList {
pktFile1[i] = "/tmp/" + getRandomString() + ".txt"
pktFile2[i] = "/tmp/" + getRandomString() + ".txt"
pktFile3[i] = "/tmp/" + getRandomString() + ".txt"
startMcastTrafficOnPod(oc, ns, podName, ipv4List, pktFile1[i], mcastipv4[0], port[0])
startMcastTrafficOnPod(oc, ns, podName, ipv4List, pktFile2[i], mcastipv4[1], port[1])
startMcastTrafficOnPod(oc, ns, podName, ipv4List, pktFile3[i], mcastipv4[2], port[2])
}
//add sleep time to make sure traffic started
time.Sleep(5 * time.Second)
//choose one pod to check the multicast ip address
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4[0])
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4[1])
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4[2])
//add sleep time to make sure traffic completed.
time.Sleep(20 * time.Second)
//choose one pod to check the received multicast pakets
chkRes1 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4[0], pktFile1[1])
chkRes2 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4[1], pktFile2[1])
chkRes3 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4[2], pktFile3[1])
o.Expect(chkRes1).Should(o.BeTrue())
o.Expect(chkRes2).Should(o.BeTrue())
o.Expect(chkRes3).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
for i, podName := range mcastPodList {
pktFile1[i] = "/tmp/" + getRandomString() + ".txt"
pktFile2[i] = "/tmp/" + getRandomString() + ".txt"
pktFile3[i] = "/tmp/" + getRandomString() + ".txt"
startMcastTrafficOnPod(oc, ns, podName, ipv6List, pktFile1[i], mcastipv6[0], port[0])
startMcastTrafficOnPod(oc, ns, podName, ipv6List, pktFile2[i], mcastipv6[1], port[1])
startMcastTrafficOnPod(oc, ns, podName, ipv6List, pktFile3[i], mcastipv6[2], port[2])
}
//add sleep time to make sure traffic started.
time.Sleep(5 * time.Second)
//choose one pod to check the multicast ipv6 address
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6[0])
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6[1])
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6[2])
//add sleep time to make sure traffic completed.
time.Sleep(20 * time.Second)
//choose one pod to check the received multicast pakets
chkRes1 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6[0], pktFile1[2])
chkRes2 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6[1], pktFile2[2])
chkRes3 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6[2], pktFile3[2])
o.Expect(chkRes1).Should(o.BeTrue())
o.Expect(chkRes2).Should(o.BeTrue())
o.Expect(chkRes3).Should(o.BeTrue())
}
}
})
g.It("Author:yingwang-High-78450-Same multicast groups can be created in multiple namespaces with udn configured.", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns1 string
ns2 string
ipStackType = checkIPStackType(oc)
ipv4List1 []string
ipv6List1 []string
ipv4List2 []string
ipv6List2 []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("1. create 2 udn namespaces")
oc.CreateNamespaceUDN()
ns1 = oc.Namespace()
oc.CreateNamespaceUDN()
ns2 = oc.Namespace()
exutil.By("2. Create CRD for UDNs")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, ns1, "udn-78450-1", ipv4cidr, ipv6cidr, cidr, "layer3")
createGeneralUDNCRD(oc, ns2, "udn-78450-2", ipv4cidr, ipv6cidr, cidr, "layer3")
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns1 = oc.Namespace()
oc.SetupProject()
ns2 = oc.Namespace()
}
exutil.By("3. Create 3 multicast testing pods")
mcastPodRc1 := networkingRes{
name: "mcastpod-rc-1",
namespace: ns1,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
mcastPodRc2 := networkingRes{
name: "mcastpod-rc-2",
namespace: ns2,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc1.kind, mcastPodRc1.name, "-n", ns1)
mcastPodRc1.create(oc, "RCNAME="+mcastPodRc1.name, "-n", ns1)
defer removeResource(oc, true, true, mcastPodRc2.kind, mcastPodRc2.name, "-n", ns2)
mcastPodRc2.create(oc, "RCNAME="+mcastPodRc2.name, "-n", ns2)
err := waitForPodWithLabelReady(oc, ns1, "name="+mcastPodRc1.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc1.name+" not ready")
err = waitForPodWithLabelReady(oc, ns2, "name="+mcastPodRc2.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc2.name+" not ready")
mcastPodList1 := getPodName(oc, ns1, "name="+mcastPodRc1.name)
mcastPodList2 := getPodName(oc, ns2, "name="+mcastPodRc2.name)
exutil.By("4. enable mulitcast and send multicast traffic in different ns to join a same multicast group")
enableMulticast(oc, ns1)
enableMulticast(oc, ns2)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List1 = getPodIPv4UDNList(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4UDNList(oc, ns2, mcastPodList2)
} else {
ipv4List1 = getPodIPv4List(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4List(oc, ns2, mcastPodList2)
}
chkRes1 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv4List1, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeTrue())
chkRes2 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv4List2, mcastipv4, port)
o.Expect(chkRes2).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List1 = getPodIPv6UDNList(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6UDNList(oc, ns2, mcastPodList2)
} else {
ipv6List1 = getPodIPv6List(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6List(oc, ns2, mcastPodList2)
}
chkRes3 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv6List1, mcastipv6, port)
o.Expect(chkRes3).Should(o.BeTrue())
chkRes4 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv6List2, mcastipv6, port)
o.Expect(chkRes4).Should(o.BeTrue())
}
}
})
g.It("Author:yingwang-High-78382-check CUDN pods should not be able to receive multicast traffic from other pods in different namespace which sharing a same CUDN (layer 3).", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns1 string
ns2 string
key = "test.cudn.layer3"
ipStackType = checkIPStackType(oc)
ipv4List1 []string
ipv6List1 []string
ipv4List2 []string
ipv6List2 []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
crdName = "cudn-network-78382"
values = []string{"value-78382-1", "value-78382-2"}
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("###1. create 2 namespaces for CUDN")
oc.CreateNamespaceUDN()
ns1 = oc.Namespace()
oc.CreateNamespaceUDN()
ns2 = oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, fmt.Sprintf("%s-", key)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, fmt.Sprintf("%s=%s", key, values[0])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, fmt.Sprintf("%s-", key)).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, fmt.Sprintf("%s=%s", key, values[1])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("####2. create CUDN in cudnNS")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err = createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer3", values)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns1 = oc.Namespace()
oc.SetupProject()
ns2 = oc.Namespace()
}
exutil.By("####3. Create 3 multicast testing pods")
mcastPodRc1 := networkingRes{
name: "mcastpod-rc-1",
namespace: ns1,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
mcastPodRc2 := networkingRes{
name: "mcastpod-rc-2",
namespace: ns2,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc1.kind, mcastPodRc1.name, "-n", ns1)
mcastPodRc1.create(oc, "RCNAME="+mcastPodRc1.name, "-n", ns1)
defer removeResource(oc, true, true, mcastPodRc2.kind, mcastPodRc2.name, "-n", ns2)
mcastPodRc2.create(oc, "RCNAME="+mcastPodRc2.name, "-n", ns2)
err := waitForPodWithLabelReady(oc, ns1, "name="+mcastPodRc1.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc1.name+" not ready")
err = waitForPodWithLabelReady(oc, ns2, "name="+mcastPodRc2.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc2.name+" not ready")
mcastPodList1 := getPodName(oc, ns1, "name="+mcastPodRc1.name)
mcastPodList2 := getPodName(oc, ns2, "name="+mcastPodRc2.name)
exutil.By("###4. enable mulitcast and send multicast traffic in different ns to join a same multicast group")
enableMulticast(oc, ns1)
enableMulticast(oc, ns2)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List1 = getPodIPv4UDNList(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4UDNList(oc, ns2, mcastPodList2)
} else {
ipv4List1 = getPodIPv4List(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4List(oc, ns2, mcastPodList2)
}
chkRes1 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv4List1, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeTrue())
chkRes2 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv4List2, mcastipv4, port)
o.Expect(chkRes2).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List1 = getPodIPv6UDNList(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6UDNList(oc, ns2, mcastPodList2)
} else {
ipv6List1 = getPodIPv6List(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6List(oc, ns2, mcastPodList2)
}
chkRes3 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv6List1, mcastipv6, port)
o.Expect(chkRes3).Should(o.BeTrue())
chkRes4 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv6List2, mcastipv6, port)
o.Expect(chkRes4).Should(o.BeTrue())
}
exutil.By("###5. send multicast traffic accross different ns to join a same multicast group")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
var podIPv4_1, podIPv4_2 string
if i == 0 {
podIPv4_1 = getPodIPUDNv4(oc, ns1, mcastPodList1[0], "ovn-udn1")
podIPv4_2 = getPodIPUDNv4(oc, ns2, mcastPodList2[0], "ovn-udn1")
} else {
podIPv4_1 = getPodIPv4(oc, ns1, mcastPodList1[0])
podIPv4_2 = getPodIPv4(oc, ns2, mcastPodList2[0])
}
ipv4List := []string{podIPv4_1, podIPv4_2}
pktFile1 := "/tmp/" + getRandomString() + ".txt"
pktFile2 := "/tmp/" + getRandomString() + ".txt"
//send multicast traffic accrocss different ns
startMcastTrafficOnPod(oc, ns1, mcastPodList1[0], ipv4List, pktFile1, mcastipv4, port)
startMcastTrafficOnPod(oc, ns2, mcastPodList2[0], ipv4List, pktFile2, mcastipv4, port)
//add sleep time to make sure traffic completed.
time.Sleep(30 * time.Second)
chkRes1 := chkMcatRcvOnPod(oc, ns1, mcastPodList1[0], podIPv4_1, ipv4List, mcastipv4, pktFile1)
chkRes2 := chkMcatRcvOnPod(oc, ns2, mcastPodList2[0], podIPv4_2, ipv4List, mcastipv4, pktFile2)
o.Expect(chkRes1).Should(o.BeFalse())
o.Expect(chkRes2).Should(o.BeFalse())
}
if ipStackType == "dualstack" || ipStackType == "dualstack" {
var podIPv6_1, podIPv6_2 string
if i == 0 {
podIPv6_1 = getPodIPUDNv6(oc, ns1, mcastPodList1[0], "ovn-udn1")
podIPv6_2 = getPodIPUDNv6(oc, ns2, mcastPodList2[0], "ovn-udn1")
} else {
podIPv6_1 = getPodIPv6(oc, ns1, mcastPodList1[0], ipStackType)
podIPv6_2 = getPodIPv6(oc, ns2, mcastPodList2[0], ipStackType)
}
ipv6List := []string{podIPv6_1, podIPv6_2}
pktFile1 := "/tmp/" + getRandomString() + ".txt"
pktFile2 := "/tmp/" + getRandomString() + ".txt"
//send multicast traffic accrocss different ns
startMcastTrafficOnPod(oc, ns1, mcastPodList1[0], ipv6List, pktFile1, mcastipv6, port)
startMcastTrafficOnPod(oc, ns2, mcastPodList2[0], ipv6List, pktFile2, mcastipv6, port)
//add sleep time to make sure traffic completed.
time.Sleep(30 * time.Second)
chkRes1 := chkMcatRcvOnPod(oc, ns1, mcastPodList1[0], podIPv6_1, ipv6List, mcastipv6, pktFile1)
chkRes2 := chkMcatRcvOnPod(oc, ns2, mcastPodList2[0], podIPv6_2, ipv6List, mcastipv6, pktFile2)
o.Expect(chkRes1).Should(o.BeFalse())
o.Expect(chkRes2).Should(o.BeFalse())
}
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
a70addad-20d5-465a-9567-a84638a88ca9
|
Author:yingwang-High-78447-udn pods should/should not receive multicast traffic when enable/disable multicast.
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_udn.go
|
g.It("Author:yingwang-High-78447-udn pods should/should not receive multicast traffic when enable/disable multicast.", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns string
ipStackType = checkIPStackType(oc)
ipv4List []string
ipv6List []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("1. create udn namespace")
oc.CreateNamespaceUDN()
ns = oc.Namespace()
exutil.By("2. Create CRD for UDN")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, ns, "udn-78447", ipv4cidr, ipv6cidr, cidr, "layer3")
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns = oc.Namespace()
}
exutil.By("3. Create 3 multicast testing pods")
mcastPodRc := networkingRes{
name: "mcastpod-rc",
namespace: ns,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc.kind, mcastPodRc.name, "-n", mcastPodRc.namespace)
mcastPodRc.create(oc, "RCNAME="+mcastPodRc.name, "-n", mcastPodRc.namespace)
err := waitForPodWithLabelReady(oc, ns, "name="+mcastPodRc.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc.name+" not ready")
mcastPodList := getPodName(oc, ns, "name="+mcastPodRc.name)
exutil.By("4. check multicast traffic without enable multicast in ns")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List = getPodIPv4UDNList(oc, ns, mcastPodList)
} else {
ipv4List = getPodIPv4List(oc, ns, mcastPodList)
}
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeFalse())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
chkRes2 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes2).Should(o.BeFalse())
}
exutil.By("5. enable multicast and check multicast traffic again")
enableMulticast(oc, ns)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
chkRes2 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes2).Should(o.BeTrue())
}
exutil.By("6. disable multicast and check multicast traffic again")
disableMulticast(oc, ns)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
chkRes1 := chkMcastTraffic(oc, ns, mcastPodList, ipv4List, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeFalse())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
chkRes2 := chkMcastTraffic(oc, ns, mcastPodList, ipv6List, mcastipv6, port)
o.Expect(chkRes2).Should(o.BeFalse())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
b6a88579-d5b9-4659-ad00-ea6bf11b8d50
|
Author:yingwang-High-78448-udn pods can join different multicast groups at same time.
|
['"context"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_udn.go
|
g.It("Author:yingwang-High-78448-udn pods can join different multicast groups at same time.", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns string
ipStackType = checkIPStackType(oc)
ipv4List []string
ipv6List []string
mcastipv4 = []string{"232.43.211.234", "232.43.211.235", "232.43.211.236"}
mcastipv6 = []string{"ff3e::4321:1234", "ff3e::4321:1235", "ff3e::4321:1236"}
intf string
port = []string{"4321", "4322", "4323"}
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for j := 0; j < 2; j++ {
if j == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("###1. create udn namespace")
oc.CreateNamespaceUDN()
ns = oc.Namespace()
intf = "ovn-udn1"
exutil.By("###2. Create CRD for UDN")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, ns, "udn-78448", ipv4cidr, ipv6cidr, cidr, "layer3")
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns = oc.Namespace()
intf = "eth0"
}
exutil.By("###3. Create 3 multicast testing pods")
mcastPodRc := networkingRes{
name: "mcastpod-rc",
namespace: ns,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc.kind, mcastPodRc.name, "-n", ns)
mcastPodRc.create(oc, "RCNAME="+mcastPodRc.name, "-n", mcastPodRc.namespace)
err := waitForPodWithLabelReady(oc, ns, "name="+mcastPodRc.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc.name+" not ready")
mcastPodList := getPodName(oc, ns, "name="+mcastPodRc.name)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if j == 0 {
ipv4List = getPodIPv4UDNList(oc, ns, mcastPodList)
} else {
ipv4List = getPodIPv4List(oc, ns, mcastPodList)
}
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if j == 0 {
ipv6List = getPodIPv6UDNList(oc, ns, mcastPodList)
} else {
ipv6List = getPodIPv6List(oc, ns, mcastPodList)
}
}
exutil.By("###4. enable multicast and check multicast traffic")
enableMulticast(oc, ns)
//send multicast traffic to join different multicast group at the same time
pktFile1 := make([]string, len(mcastPodList))
pktFile2 := make([]string, len(mcastPodList))
pktFile3 := make([]string, len(mcastPodList))
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
for i, podName := range mcastPodList {
pktFile1[i] = "/tmp/" + getRandomString() + ".txt"
pktFile2[i] = "/tmp/" + getRandomString() + ".txt"
pktFile3[i] = "/tmp/" + getRandomString() + ".txt"
startMcastTrafficOnPod(oc, ns, podName, ipv4List, pktFile1[i], mcastipv4[0], port[0])
startMcastTrafficOnPod(oc, ns, podName, ipv4List, pktFile2[i], mcastipv4[1], port[1])
startMcastTrafficOnPod(oc, ns, podName, ipv4List, pktFile3[i], mcastipv4[2], port[2])
}
//add sleep time to make sure traffic started
time.Sleep(5 * time.Second)
//choose one pod to check the multicast ip address
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4[0])
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4[1])
chkMcastAddress(oc, ns, mcastPodList[0], intf, mcastipv4[2])
//add sleep time to make sure traffic completed.
time.Sleep(20 * time.Second)
//choose one pod to check the received multicast pakets
chkRes1 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4[0], pktFile1[1])
chkRes2 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4[1], pktFile2[1])
chkRes3 := chkMcatRcvOnPod(oc, ns, mcastPodList[1], ipv4List[1], ipv4List, mcastipv4[2], pktFile3[1])
o.Expect(chkRes1).Should(o.BeTrue())
o.Expect(chkRes2).Should(o.BeTrue())
o.Expect(chkRes3).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
for i, podName := range mcastPodList {
pktFile1[i] = "/tmp/" + getRandomString() + ".txt"
pktFile2[i] = "/tmp/" + getRandomString() + ".txt"
pktFile3[i] = "/tmp/" + getRandomString() + ".txt"
startMcastTrafficOnPod(oc, ns, podName, ipv6List, pktFile1[i], mcastipv6[0], port[0])
startMcastTrafficOnPod(oc, ns, podName, ipv6List, pktFile2[i], mcastipv6[1], port[1])
startMcastTrafficOnPod(oc, ns, podName, ipv6List, pktFile3[i], mcastipv6[2], port[2])
}
//add sleep time to make sure traffic started.
time.Sleep(5 * time.Second)
//choose one pod to check the multicast ipv6 address
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6[0])
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6[1])
chkMcastAddress(oc, ns, mcastPodList[2], intf, mcastipv6[2])
//add sleep time to make sure traffic completed.
time.Sleep(20 * time.Second)
//choose one pod to check the received multicast pakets
chkRes1 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6[0], pktFile1[2])
chkRes2 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6[1], pktFile2[2])
chkRes3 := chkMcatRcvOnPod(oc, ns, mcastPodList[2], ipv6List[2], ipv6List, mcastipv6[2], pktFile3[2])
o.Expect(chkRes1).Should(o.BeTrue())
o.Expect(chkRes2).Should(o.BeTrue())
o.Expect(chkRes3).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d969917f-e270-4749-bf9c-cd32be7da9e2
|
Author:yingwang-High-78450-Same multicast groups can be created in multiple namespaces with udn configured.
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_udn.go
|
g.It("Author:yingwang-High-78450-Same multicast groups can be created in multiple namespaces with udn configured.", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns1 string
ns2 string
ipStackType = checkIPStackType(oc)
ipv4List1 []string
ipv6List1 []string
ipv4List2 []string
ipv6List2 []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("############# test multicast on pods with udn primary interface")
exutil.By("1. create 2 udn namespaces")
oc.CreateNamespaceUDN()
ns1 = oc.Namespace()
oc.CreateNamespaceUDN()
ns2 = oc.Namespace()
exutil.By("2. Create CRD for UDNs")
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, ns1, "udn-78450-1", ipv4cidr, ipv6cidr, cidr, "layer3")
createGeneralUDNCRD(oc, ns2, "udn-78450-2", ipv4cidr, ipv6cidr, cidr, "layer3")
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns1 = oc.Namespace()
oc.SetupProject()
ns2 = oc.Namespace()
}
exutil.By("3. Create 3 multicast testing pods")
mcastPodRc1 := networkingRes{
name: "mcastpod-rc-1",
namespace: ns1,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
mcastPodRc2 := networkingRes{
name: "mcastpod-rc-2",
namespace: ns2,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc1.kind, mcastPodRc1.name, "-n", ns1)
mcastPodRc1.create(oc, "RCNAME="+mcastPodRc1.name, "-n", ns1)
defer removeResource(oc, true, true, mcastPodRc2.kind, mcastPodRc2.name, "-n", ns2)
mcastPodRc2.create(oc, "RCNAME="+mcastPodRc2.name, "-n", ns2)
err := waitForPodWithLabelReady(oc, ns1, "name="+mcastPodRc1.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc1.name+" not ready")
err = waitForPodWithLabelReady(oc, ns2, "name="+mcastPodRc2.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc2.name+" not ready")
mcastPodList1 := getPodName(oc, ns1, "name="+mcastPodRc1.name)
mcastPodList2 := getPodName(oc, ns2, "name="+mcastPodRc2.name)
exutil.By("4. enable mulitcast and send multicast traffic in different ns to join a same multicast group")
enableMulticast(oc, ns1)
enableMulticast(oc, ns2)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List1 = getPodIPv4UDNList(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4UDNList(oc, ns2, mcastPodList2)
} else {
ipv4List1 = getPodIPv4List(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4List(oc, ns2, mcastPodList2)
}
chkRes1 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv4List1, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeTrue())
chkRes2 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv4List2, mcastipv4, port)
o.Expect(chkRes2).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List1 = getPodIPv6UDNList(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6UDNList(oc, ns2, mcastPodList2)
} else {
ipv6List1 = getPodIPv6List(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6List(oc, ns2, mcastPodList2)
}
chkRes3 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv6List1, mcastipv6, port)
o.Expect(chkRes3).Should(o.BeTrue())
chkRes4 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv6List2, mcastipv6, port)
o.Expect(chkRes4).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
fa8c550c-b366-4268-9a45-8b4c6deb1c87
|
Author:yingwang-High-78382-check CUDN pods should not be able to receive multicast traffic from other pods in different namespace which sharing a same CUDN (layer 3).
|
['"context"', '"fmt"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_udn.go
|
g.It("Author:yingwang-High-78382-check CUDN pods should not be able to receive multicast traffic from other pods in different namespace which sharing a same CUDN (layer 3).", func() {
var (
mcastPodTemplate = filepath.Join(testDataDirMcast, "multicast-rc.json")
ns1 string
ns2 string
key = "test.cudn.layer3"
ipStackType = checkIPStackType(oc)
ipv4List1 []string
ipv6List1 []string
ipv4List2 []string
ipv6List2 []string
mcastipv4 = "232.43.211.234"
mcastipv6 = "ff3e::4321:1234"
port = "4321"
crdName = "cudn-network-78382"
values = []string{"value-78382-1", "value-78382-2"}
)
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This test requires at least 2 worker nodes which is not fulfilled. ")
}
//cover both udn and default network. when i ==0, test udn, when i ==1, test default
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("###1. create 2 namespaces for CUDN")
oc.CreateNamespaceUDN()
ns1 = oc.Namespace()
oc.CreateNamespaceUDN()
ns2 = oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, fmt.Sprintf("%s-", key)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, fmt.Sprintf("%s=%s", key, values[0])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, fmt.Sprintf("%s-", key)).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, fmt.Sprintf("%s=%s", key, values[1])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("####2. create CUDN in cudnNS")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err = createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer3", values)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
exutil.By("############# test multicast on pods with default interface")
oc.SetupProject()
ns1 = oc.Namespace()
oc.SetupProject()
ns2 = oc.Namespace()
}
exutil.By("####3. Create 3 multicast testing pods")
mcastPodRc1 := networkingRes{
name: "mcastpod-rc-1",
namespace: ns1,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
mcastPodRc2 := networkingRes{
name: "mcastpod-rc-2",
namespace: ns2,
kind: "ReplicationController",
tempfile: mcastPodTemplate,
}
defer removeResource(oc, true, true, mcastPodRc1.kind, mcastPodRc1.name, "-n", ns1)
mcastPodRc1.create(oc, "RCNAME="+mcastPodRc1.name, "-n", ns1)
defer removeResource(oc, true, true, mcastPodRc2.kind, mcastPodRc2.name, "-n", ns2)
mcastPodRc2.create(oc, "RCNAME="+mcastPodRc2.name, "-n", ns2)
err := waitForPodWithLabelReady(oc, ns1, "name="+mcastPodRc1.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc1.name+" not ready")
err = waitForPodWithLabelReady(oc, ns2, "name="+mcastPodRc2.name)
exutil.AssertWaitPollNoErr(err, "pod with label name="+mcastPodRc2.name+" not ready")
mcastPodList1 := getPodName(oc, ns1, "name="+mcastPodRc1.name)
mcastPodList2 := getPodName(oc, ns2, "name="+mcastPodRc2.name)
exutil.By("###4. enable mulitcast and send multicast traffic in different ns to join a same multicast group")
enableMulticast(oc, ns1)
enableMulticast(oc, ns2)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
if i == 0 {
ipv4List1 = getPodIPv4UDNList(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4UDNList(oc, ns2, mcastPodList2)
} else {
ipv4List1 = getPodIPv4List(oc, ns1, mcastPodList1)
ipv4List2 = getPodIPv4List(oc, ns2, mcastPodList2)
}
chkRes1 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv4List1, mcastipv4, port)
o.Expect(chkRes1).Should(o.BeTrue())
chkRes2 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv4List2, mcastipv4, port)
o.Expect(chkRes2).Should(o.BeTrue())
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
if i == 0 {
ipv6List1 = getPodIPv6UDNList(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6UDNList(oc, ns2, mcastPodList2)
} else {
ipv6List1 = getPodIPv6List(oc, ns1, mcastPodList1)
ipv6List2 = getPodIPv6List(oc, ns2, mcastPodList2)
}
chkRes3 := chkMcastTraffic(oc, ns1, mcastPodList1, ipv6List1, mcastipv6, port)
o.Expect(chkRes3).Should(o.BeTrue())
chkRes4 := chkMcastTraffic(oc, ns2, mcastPodList2, ipv6List2, mcastipv6, port)
o.Expect(chkRes4).Should(o.BeTrue())
}
exutil.By("###5. send multicast traffic accross different ns to join a same multicast group")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
var podIPv4_1, podIPv4_2 string
if i == 0 {
podIPv4_1 = getPodIPUDNv4(oc, ns1, mcastPodList1[0], "ovn-udn1")
podIPv4_2 = getPodIPUDNv4(oc, ns2, mcastPodList2[0], "ovn-udn1")
} else {
podIPv4_1 = getPodIPv4(oc, ns1, mcastPodList1[0])
podIPv4_2 = getPodIPv4(oc, ns2, mcastPodList2[0])
}
ipv4List := []string{podIPv4_1, podIPv4_2}
pktFile1 := "/tmp/" + getRandomString() + ".txt"
pktFile2 := "/tmp/" + getRandomString() + ".txt"
//send multicast traffic accrocss different ns
startMcastTrafficOnPod(oc, ns1, mcastPodList1[0], ipv4List, pktFile1, mcastipv4, port)
startMcastTrafficOnPod(oc, ns2, mcastPodList2[0], ipv4List, pktFile2, mcastipv4, port)
//add sleep time to make sure traffic completed.
time.Sleep(30 * time.Second)
chkRes1 := chkMcatRcvOnPod(oc, ns1, mcastPodList1[0], podIPv4_1, ipv4List, mcastipv4, pktFile1)
chkRes2 := chkMcatRcvOnPod(oc, ns2, mcastPodList2[0], podIPv4_2, ipv4List, mcastipv4, pktFile2)
o.Expect(chkRes1).Should(o.BeFalse())
o.Expect(chkRes2).Should(o.BeFalse())
}
if ipStackType == "dualstack" || ipStackType == "dualstack" {
var podIPv6_1, podIPv6_2 string
if i == 0 {
podIPv6_1 = getPodIPUDNv6(oc, ns1, mcastPodList1[0], "ovn-udn1")
podIPv6_2 = getPodIPUDNv6(oc, ns2, mcastPodList2[0], "ovn-udn1")
} else {
podIPv6_1 = getPodIPv6(oc, ns1, mcastPodList1[0], ipStackType)
podIPv6_2 = getPodIPv6(oc, ns2, mcastPodList2[0], ipStackType)
}
ipv6List := []string{podIPv6_1, podIPv6_2}
pktFile1 := "/tmp/" + getRandomString() + ".txt"
pktFile2 := "/tmp/" + getRandomString() + ".txt"
//send multicast traffic accrocss different ns
startMcastTrafficOnPod(oc, ns1, mcastPodList1[0], ipv6List, pktFile1, mcastipv6, port)
startMcastTrafficOnPod(oc, ns2, mcastPodList2[0], ipv6List, pktFile2, mcastipv6, port)
//add sleep time to make sure traffic completed.
time.Sleep(30 * time.Second)
chkRes1 := chkMcatRcvOnPod(oc, ns1, mcastPodList1[0], podIPv6_1, ipv6List, mcastipv6, pktFile1)
chkRes2 := chkMcatRcvOnPod(oc, ns2, mcastPodList2[0], podIPv6_2, ipv6List, mcastipv6, pktFile2)
o.Expect(chkRes1).Should(o.BeFalse())
o.Expect(chkRes2).Should(o.BeFalse())
}
}
})
| |||||
file
|
openshift/openshift-tests-private
|
ee45c370-508e-4f46-a06f-c784bce8456d
|
multicast_util
|
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
package networking
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
// send omping traffic on all multicast pods
func chkMcastTraffic(oc *exutil.CLI, namespace string, podList []string, ipList []string, mcastip string, port string) bool {
pktFile := make([]string, len(podList))
//omping on each mulitcast pod in parallel
for i, podName := range podList {
pktFile[i] = "/tmp/" + getRandomString() + ".txt"
startMcastTrafficOnPod(oc, namespace, podName, ipList, pktFile[i], mcastip, port)
}
// wait for omping packtes send and receive.
time.Sleep(30 * time.Second)
// check omping send/receive results
for i, podName := range podList {
if !chkMcatRcvOnPod(oc, namespace, podName, ipList[i], ipList, mcastip, pktFile[i]) {
return false
}
}
return true
}
// send multicast traffic via omping
func startMcastTrafficOnPod(oc *exutil.CLI, ns string, pod string, ipList []string, pktfile string, mcastip string, port string) {
ipStr := strings.Join(ipList, " ")
if port == "" {
port = "4321"
}
go func() {
ompingCmd := "omping " + "-q " + "-p " + port + " -c 20 -T 20 -m " + mcastip + " " + ipStr + " > " + fmt.Sprintf("%s", pktfile) + " &"
_, err := e2eoutput.RunHostCmd(ns, pod, ompingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}()
}
func chkMcatRcvOnPod(oc *exutil.CLI, ns string, pod string, podip string, iplist []string, mcastip string, pktfile string) bool {
catCmd := "cat " + fmt.Sprintf("%s", pktfile)
outPut, err := e2eoutput.RunHostCmd(ns, pod, catCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(outPut).NotTo(o.BeEmpty())
for _, neighborip := range iplist {
if neighborip != podip {
reg1 := regexp.MustCompile(neighborip + `.*joined \(S,G\) = \(\*,\s*` + mcastip + `\), pinging`)
reg2 := regexp.MustCompile(neighborip + `.*multicast, xmt/rcv/%loss = \d+/(\d+)/\d+%`)
match1 := reg1.MatchString(outPut)
match2 := reg2.FindStringSubmatch(outPut)
o.Expect(match2).ShouldNot(o.Equal(nil))
pktNum, _ := strconv.Atoi(match2[1])
e2e.Logf("Received packets on pod %v from ip %v is %v", pod, neighborip, pktNum)
if pktNum == 0 || !match1 {
return false
}
}
}
return true
}
// get ipv4 addresses of udn pods
func getPodIPv4UDNList(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv6single"))
for _, podName := range podList {
podIP1, podIP2 := getPodIPUDN(oc, namespace, podName, "ovn-udn1")
if ipStackType == "dualstack" {
ipList = append(ipList, podIP2)
} else {
ipList = append(ipList, podIP1)
}
}
e2e.Logf("The ipv4list for pods is %v", ipList)
return ipList
}
// get ipv6 addresses of udn pods
func getPodIPv6UDNList(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv4single"))
for _, podName := range podList {
podIP1, _ := getPodIPUDN(oc, namespace, podName, "ovn-udn1")
ipList = append(ipList, podIP1)
}
e2e.Logf("The ipv6list for pods is %v", ipList)
return ipList
}
// get ipv4 addresses of default pods
func getPodIPv4List(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv6single"))
for _, podName := range podList {
podIP := getPodIPv4(oc, namespace, podName)
ipList = append(ipList, podIP)
}
e2e.Logf("The ipv4list for pods is %v", ipList)
return ipList
}
// get ipv6 addresses of default pods
func getPodIPv6List(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv4single"))
for _, podName := range podList {
podIP := getPodIPv6(oc, namespace, podName, ipStackType)
ipList = append(ipList, podIP)
}
e2e.Logf("The ipv6list for pods is %v", ipList)
return ipList
}
// check netstat during sending multicast traffic
func chkMcastAddress(oc *exutil.CLI, ns string, pod string, intf string, mcastip string) {
netstatCmd := "netstat -ng"
outPut, err := e2eoutput.RunHostCmd(ns, pod, netstatCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("netstat result is %v: /n", outPut)
reg := regexp.MustCompile(intf + `\s+\d+\s+` + mcastip)
matchRes := reg.MatchString(outPut)
o.Expect(matchRes).Should(o.BeTrue())
}
// disable multicast on specific namespace
func disableMulticast(oc *exutil.CLI, ns string) {
_, err := runOcWithRetry(oc.AsAdmin().WithoutNamespace(), "annotate", "namespace", ns, "k8s.ovn.org/multicast-enabled-")
o.Expect(err).NotTo(o.HaveOccurred())
}
// getPodIPUDNv4 returns IPv4 address of specific interface
func getPodIPUDNv4(oc *exutil.CLI, namespace string, podName string, netName string) string {
ipStack := checkIPStackType(oc)
ip_1, ip_2 := getPodIPUDN(oc, namespace, podName, netName)
if ipStack == "ipv4single" {
return ip_1
} else if ipStack == "dualstack" {
return ip_2
} else {
return ""
}
}
// getPodIPUDNv6 returns IPv6 address of specific interface
func getPodIPUDNv6(oc *exutil.CLI, namespace string, podName string, netName string) string {
ipStack := checkIPStackType(oc)
ip_1, _ := getPodIPUDN(oc, namespace, podName, netName)
if ipStack == "ipv6single" || ipStack == "dualstack" {
return ip_1
} else {
return ""
}
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
450321f6-0108-49ba-82c5-780c793454ca
|
chkMcastTraffic
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func chkMcastTraffic(oc *exutil.CLI, namespace string, podList []string, ipList []string, mcastip string, port string) bool {
pktFile := make([]string, len(podList))
//omping on each mulitcast pod in parallel
for i, podName := range podList {
pktFile[i] = "/tmp/" + getRandomString() + ".txt"
startMcastTrafficOnPod(oc, namespace, podName, ipList, pktFile[i], mcastip, port)
}
// wait for omping packtes send and receive.
time.Sleep(30 * time.Second)
// check omping send/receive results
for i, podName := range podList {
if !chkMcatRcvOnPod(oc, namespace, podName, ipList[i], ipList, mcastip, pktFile[i]) {
return false
}
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
55eb11ea-e821-485d-b8fc-c46a6e585cdb
|
startMcastTrafficOnPod
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func startMcastTrafficOnPod(oc *exutil.CLI, ns string, pod string, ipList []string, pktfile string, mcastip string, port string) {
ipStr := strings.Join(ipList, " ")
if port == "" {
port = "4321"
}
go func() {
ompingCmd := "omping " + "-q " + "-p " + port + " -c 20 -T 20 -m " + mcastip + " " + ipStr + " > " + fmt.Sprintf("%s", pktfile) + " &"
_, err := e2eoutput.RunHostCmd(ns, pod, ompingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}()
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
3c1af95c-bf80-4f23-9aac-a1a18394a121
|
chkMcatRcvOnPod
|
['"fmt"', '"regexp"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func chkMcatRcvOnPod(oc *exutil.CLI, ns string, pod string, podip string, iplist []string, mcastip string, pktfile string) bool {
catCmd := "cat " + fmt.Sprintf("%s", pktfile)
outPut, err := e2eoutput.RunHostCmd(ns, pod, catCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(outPut).NotTo(o.BeEmpty())
for _, neighborip := range iplist {
if neighborip != podip {
reg1 := regexp.MustCompile(neighborip + `.*joined \(S,G\) = \(\*,\s*` + mcastip + `\), pinging`)
reg2 := regexp.MustCompile(neighborip + `.*multicast, xmt/rcv/%loss = \d+/(\d+)/\d+%`)
match1 := reg1.MatchString(outPut)
match2 := reg2.FindStringSubmatch(outPut)
o.Expect(match2).ShouldNot(o.Equal(nil))
pktNum, _ := strconv.Atoi(match2[1])
e2e.Logf("Received packets on pod %v from ip %v is %v", pod, neighborip, pktNum)
if pktNum == 0 || !match1 {
return false
}
}
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
b58f4247-614d-4bef-b5fa-e0d1e153d60f
|
getPodIPv4UDNList
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func getPodIPv4UDNList(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv6single"))
for _, podName := range podList {
podIP1, podIP2 := getPodIPUDN(oc, namespace, podName, "ovn-udn1")
if ipStackType == "dualstack" {
ipList = append(ipList, podIP2)
} else {
ipList = append(ipList, podIP1)
}
}
e2e.Logf("The ipv4list for pods is %v", ipList)
return ipList
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
3ccae66c-8565-4fcc-ae85-352c77038d3c
|
getPodIPv6UDNList
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func getPodIPv6UDNList(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv4single"))
for _, podName := range podList {
podIP1, _ := getPodIPUDN(oc, namespace, podName, "ovn-udn1")
ipList = append(ipList, podIP1)
}
e2e.Logf("The ipv6list for pods is %v", ipList)
return ipList
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
4fc6ae88-f85d-4f16-a9dc-b086060a3614
|
getPodIPv4List
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func getPodIPv4List(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv6single"))
for _, podName := range podList {
podIP := getPodIPv4(oc, namespace, podName)
ipList = append(ipList, podIP)
}
e2e.Logf("The ipv4list for pods is %v", ipList)
return ipList
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
0bd3ab31-945b-42b4-a3a7-1fec767f52e5
|
getPodIPv6List
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func getPodIPv6List(oc *exutil.CLI, namespace string, podList []string) []string {
var ipList []string
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).ShouldNot(o.Equal("ipv4single"))
for _, podName := range podList {
podIP := getPodIPv6(oc, namespace, podName, ipStackType)
ipList = append(ipList, podIP)
}
e2e.Logf("The ipv6list for pods is %v", ipList)
return ipList
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
3aa41418-bbfb-48dd-8757-a56b8b1081d9
|
chkMcastAddress
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func chkMcastAddress(oc *exutil.CLI, ns string, pod string, intf string, mcastip string) {
netstatCmd := "netstat -ng"
outPut, err := e2eoutput.RunHostCmd(ns, pod, netstatCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("netstat result is %v: /n", outPut)
reg := regexp.MustCompile(intf + `\s+\d+\s+` + mcastip)
matchRes := reg.MatchString(outPut)
o.Expect(matchRes).Should(o.BeTrue())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2fec6e5e-87b4-4043-bf91-d092e0e0a6df
|
disableMulticast
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func disableMulticast(oc *exutil.CLI, ns string) {
_, err := runOcWithRetry(oc.AsAdmin().WithoutNamespace(), "annotate", "namespace", ns, "k8s.ovn.org/multicast-enabled-")
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
c96dffd4-0a36-4727-beba-59a4870805f3
|
getPodIPUDNv4
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func getPodIPUDNv4(oc *exutil.CLI, namespace string, podName string, netName string) string {
ipStack := checkIPStackType(oc)
ip_1, ip_2 := getPodIPUDN(oc, namespace, podName, netName)
if ipStack == "ipv4single" {
return ip_1
} else if ipStack == "dualstack" {
return ip_2
} else {
return ""
}
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
203cb1d6-9991-4c44-88e8-dff60906bb50
|
getPodIPUDNv6
|
github.com/openshift/openshift-tests-private/test/extended/networking/multicast_util.go
|
func getPodIPUDNv6(oc *exutil.CLI, namespace string, podName string, netName string) string {
ipStack := checkIPStackType(oc)
ip_1, _ := getPodIPUDN(oc, namespace, podName, netName)
if ipStack == "ipv6single" || ipStack == "dualstack" {
return ip_1
} else {
return ""
}
}
|
networking
| |||||
test
|
openshift/openshift-tests-private
|
cc9fc454-066a-41a1-83a4-28c527394ef2
|
multus
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN multus", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-multus", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
// OCP-46387 failed in 4.14 due to https://issues.redhat.com/browse/OCPBUGS-11082 and https://issues.redhat.com/browse/NP-752
// Enable this case until Dev fix the issue
/*
// author: [email protected]
g.It("Author:weliang-Medium-46387-[BZ 1896533] network operator degraded due to additionalNetwork in non-existent namespace. [Disruptive]", func() {
var (
patchSResource = "networks.operator.openshift.io/cluster"
patchInfo = fmt.Sprintf("{\"spec\":{\"additionalNetworks\": [{\"name\": \"secondary\",\"namespace\":\"ocp-46387\",\"simpleMacvlanConfig\": {\"ipamConfig\": {\"staticIPAMConfig\": {\"addresses\": [{\"address\": \"10.1.1.0/24\"}] },\"type\": \"static\"}},\"type\": \"SimpleMacvlan\"}]}}")
)
g.By("create new namespace")
namespace := fmt.Sprintf("ocp-46387")
err := oc.Run("new-project").Args(namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("project", namespace, "--ignore-not-found").Execute()
g.By("Configure network-attach-definition through network operator")
patchResourceAsAdmin(oc, patchSResource, patchInfo)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", `[{"op": "remove", "path": "/spec/additionalNetworks"}]`, "--type=json").Execute()
//Testing will exit when network operator is in abnormal state during 60 seconding of checking operator.
g.By("Check NetworkOperatorStatus")
checkNetworkOperatorState(oc, 10, 60)
g.By("Delete the namespace")
nsErr := oc.AsAdmin().Run("delete").Args("project", namespace, "--ignore-not-found").Execute()
o.Expect(nsErr).NotTo(o.HaveOccurred())
//Testing will exit when network operator is in abnormal state during 60 seconding of checking operator.
g.By("Check NetworkOperatorStatus after deleting namespace")
checkNetworkOperatorState(oc, 10, 60)
})
*/
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-High-57589-Whereabouts CNI timesout while iterating exclude range", func() {
//https://issues.redhat.com/browse/OCPBUGS-2948 : Whereabouts CNI timesout while iterating exclude range
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
netAttachDefFile1 = filepath.Join(buildPruningBaseDir, "multus/ipv6-excludes-largeranges-NAD.yaml")
multusPodTemplate = filepath.Join(buildPruningBaseDir, "multinetworkpolicy/MultiNetworkPolicy-pod-template.yaml")
)
ns1 := oc.Namespace()
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", netAttachDefFile1, "-n", ns1).Execute()
netAttachDefErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile1, "-n", ns1).Execute()
o.Expect(netAttachDefErr).NotTo(o.HaveOccurred())
netAttachDefOutput, netAttachDefOutputErr := oc.Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(netAttachDefOutputErr).NotTo(o.HaveOccurred())
o.Expect(netAttachDefOutput).To(o.ContainSubstring("nad-w-excludes"))
g.By("Create a multus pod to use above network-attach-defintion")
ns1MultusPod1 := testPodMultinetwork{
name: "ns1-multuspod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
nadname: "nad-w-excludes",
labelname: "blue-multuspod",
template: multusPodTemplate,
}
ns1MultusPod1.createTestPodMultinetwork(oc)
waitPodReady(oc, ns1MultusPod1.namespace, ns1MultusPod1.name)
g.By("check the created multus pod to get the right ipv6 CIDR")
multusPodIPv6 := getPodMultiNetworkIPv6(oc, ns1, ns1MultusPod1.name)
e2e.Logf("The v6 address of pod's second interface is: %v", multusPodIPv6)
o.Expect(strings.HasPrefix(multusPodIPv6, "fd43:11f1:3daa:bbaa::")).Should(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-High-59875-Configure ignored namespaces into multus-admission-controller", func() {
//https://issues.redhat.com/browse/OCPBUGS-6499:Configure ignored namespaces into multus-admission-controller
ns1 := "openshift-multus"
expectedOutpu := "-ignore-namespaces"
g.By("Check multus-admission-controller is configured with ignore-namespaces")
multusOutput, multusErr := oc.AsAdmin().Run("get").Args("deployment.apps/multus-admission-controller", "-n", ns1, "-o=jsonpath={.spec.template.spec.containers[0].command[2]}").Output()
exutil.AssertWaitPollNoErr(multusErr, "The deployment.apps/multus-admission-controller is not created")
o.Expect(multusOutput).To(o.ContainSubstring(expectedOutpu))
g.By("Check all multus-additional-cni-plugins pods are Running well")
o.Expect(waitForPodWithLabelReady(oc, ns1, "app=multus-additional-cni-plugins")).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-59440-Verify whereabouts-reconciler after creating additionalNetworks. [Serial]", func() {
var (
patchSResource = "networks.operator.openshift.io/cluster"
patchInfo = fmt.Sprintf(`{"spec":{ "additionalNetworks": [{"name": "whereabouts-shim", "namespace": "default","rawCNIConfig":"{\"cniVersion\":\"0.3.0\",\"type\":\"bridge\",\"name\":\"cnitest0\",\"ipam\": {\"type\":\"whereabouts\",\"subnet\":\"192.0.2.0/24\"}}","type":"Raw"}]}}`)
ns = "openshift-multus"
)
g.By("Check there are no whereabouts-reconciler pods and ds in the openshift-multus namespace before creating additionalNetworks ")
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", "app=whereabouts-reconciler", "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(podStatus).To(o.BeEmpty())
_, dsErrBefore := oc.AsAdmin().Run("get").Args("daemonset.apps/whereabouts-reconciler", "-n", ns).Output()
o.Expect(dsErrBefore).To(o.HaveOccurred())
g.By("Add additionalNetworks through network operator")
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", `[{"op": "remove", "path": "/spec/additionalNetworks"}]`, "--type=json").Execute()
g.By("Check NetworkOperatorStatus to ensure the cluster is health after modification")
checkNetworkOperatorState(oc, 10, 60)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfo)
g.By("Check whereabouts-reconciler pods and ds are created in the openshift-multus namespace after creating additionalNetworks ")
o.Expect(waitForPodWithLabelReady(oc, ns, "app=whereabouts-reconciler")).NotTo(o.HaveOccurred())
dsOutput, dsErrAfter := oc.AsAdmin().Run("get").Args("daemonset.apps/whereabouts-reconciler", "-n", ns).Output()
o.Expect(dsErrAfter).NotTo(o.HaveOccurred())
o.Expect(dsOutput).To(o.ContainSubstring("whereabouts-reconciler"))
g.By("Check there are no whereabouts-reconciler pods and ds in the openshift-multus namespace after deleting additionalNetworks ")
oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", `[{"op": "remove", "path": "/spec/additionalNetworks"}]`, "--type=json").Execute()
o.Eventually(func() bool {
result := true
_, err := oc.AsAdmin().Run("get").Args("pod", "-n", ns, "-l", "app=whereabouts-reconciler").Output()
if err != nil {
e2e.Logf("Wait for whereabouts-reconciler pods to be deleted")
result = false
}
return result
}, "60s", "5s").Should(o.BeTrue(), fmt.Sprintf("whereabouts-reconciler pods are not deleted"))
o.Eventually(func() bool {
result := true
_, err := oc.AsAdmin().Run("get").Args("daemonset.apps/whereabouts-reconciler", "-n", ns).Output()
if err != nil {
e2e.Logf("Wait for daemonset.apps/whereabouts-reconciler to be deleted")
result = false
}
return result
}, "60s", "5s").Should(o.BeTrue(), fmt.Sprintf("daemonset.apps/whereabouts-reconciler is not deleted"))
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-64958-Unable to set default-route when istio sidecar is injected. [Serial]", func() {
//https://issues.redhat.com/browse/OCPBUGS-7844
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
netAttachDefFile = filepath.Join(buildPruningBaseDir, "multus/istiosidecar-NAD.yaml")
testPod = filepath.Join(buildPruningBaseDir, "multus/istiosidecar-pod.yaml")
)
exutil.By("Create a new namespace")
ns1 := "test-64958"
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns1)
oc.CreateSpecifiedNamespaceAsAdmin(ns1)
exutil.By("Create a custom resource network-attach-defintion in the namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", netAttachDefFile, "-n", ns1).Execute()
netAttachDefErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns1).Execute()
o.Expect(netAttachDefErr).NotTo(o.HaveOccurred())
netAttachDefOutput, netAttachDefOutputErr := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(netAttachDefOutputErr).NotTo(o.HaveOccurred())
o.Expect(netAttachDefOutput).To(o.ContainSubstring("test-nad"))
exutil.By("Create a pod consuming above network-attach-defintion in ns1")
createResourceFromFile(oc, ns1, testPod)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=testpod")).NotTo(o.HaveOccurred(), "The test pod in ns/%s is not ready", ns1)
exutil.By("Check the default-route is created when istio sidecar is injected")
routeLog, routeErr := execCommandInSpecificPod(oc, ns1, "testpod", "ip route")
o.Expect(routeErr).NotTo(o.HaveOccurred())
o.Expect(routeLog).To(o.ContainSubstring("default via 172.19.55.99 dev net1"))
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:weliang-Medium-66876-Support Dual Stack IP assignment for whereabouts CNI/IPAM", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
nadName = "dualstack"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes")
}
exutil.By("Get the name of namespace")
ns1 := oc.Namespace()
exutil.By("Create a custom resource network-attach-defintion in the test namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := dualstackNAD{
nadname: nadName,
namespace: ns1,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.10.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
nad1ns1.createDualstackNAD(oc)
exutil.By("Check if the network-attach-defintion is created")
if checkNAD(oc, ns1, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "dualstack-pod-1",
namespace: ns1,
podlabel: "dualstack-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=dualstack-pod1")).NotTo(o.HaveOccurred())
exutil.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "dualstack-pod-2",
namespace: ns1,
podlabel: "dualstack-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=dualstack-pod2")).NotTo(o.HaveOccurred())
exutil.By("Get two pods' name")
podList, podListErr := exutil.GetAllPods(oc, ns1)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podList)).Should(o.Equal(2))
exutil.By("Get IPs of the pod1ns1's secondary interface in first namespace.")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, podList[0])
e2e.Logf("The v4 address of pod1ns1is: %v", pod1ns1IPv4)
e2e.Logf("The v6 address of pod1ns1is: %v", pod1ns1IPv6)
exutil.By("Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, podList[1])
e2e.Logf("The v4 address of pod2ns1is: %v", pod2ns1IPv4)
e2e.Logf("The v6 address of pod2ns1is: %v", pod2ns1IPv6)
g.By("Both ipv4 and ipv6 curl should pass between two pods")
curlPod2PodMultiNetworkPass(oc, ns1, podList[0], pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[1], pod1ns1IPv4, pod1ns1IPv6)
})
g.It("NonHyperShiftHOST-Author:weliang-Medium-69947-The macvlan pod will send Unsolicited Neighbor Advertisements after it is created", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
nadName = "whereabouts-dualstack"
sniffMultusPodTemplate = filepath.Join(buildPruningBaseDir, "multus/sniff-multus-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Get the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Create a custom resource network-attach-defintion")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
nadns := dualstackNAD{
nadname: nadName,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.10.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
nadns.createDualstackNAD(oc)
exutil.By("Check if the network-attach-defintion is created")
if checkNAD(oc, ns, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Create a sniff pod to capture the traffic from pod's secondary network")
pod1 := testPodMultinetwork{
name: "sniff-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
nadname: nadName,
labelname: "sniff-pod",
template: sniffMultusPodTemplate,
}
pod1.createTestPodMultinetwork(oc)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name="+pod1.labelname), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", pod1.labelname))
exutil.By("The sniff pod start to capture the Unsolicited Neighbor Advertisements from pod's secondary network")
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod1.labelname, "bash", "-c",
`timeout --preserve-status 30 tcpdump -e -i net1 icmp6 and icmp6[0] = 136 -nvvv`).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a pod consuming above network-attach-defintion")
pod2 := testMultihomingPod{
name: "dualstack-pod",
namespace: ns,
podlabel: "dualstack-pod",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name="+pod2.podlabel), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", pod2.podlabel))
exutil.By("The sniff pod will get Unsolicited Neighbor Advertisements, not neighbor solicitation")
cmdErr := cmdTcpdump.Wait()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(cmdOutput.String(), "Flags [solicited]")).NotTo(o.BeTrue(), cmdOutput.String())
})
g.It("Author:weliang-Medium-72202-[Multus] NAD without configuring network_name. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
nad1Name = "ip-overlapping-1"
nad2Name = "ip-overlapping-2"
pod1Name = "ip-overlapping-pod1"
pod2Name = "ip-overlapping-pod2"
ipv4range1 = "192.168.20.0/29"
ipv4range2 = "192.168.20.0/24"
interfaceName = "net1"
whereaboutsoverlappingIPNADTemplate = filepath.Join(buildPruningBaseDir, "multus/whereabouts-overlappingIP-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Get the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Configuring first NetworkAttachmentDefinition")
defer removeResource(oc, true, true, "net-attach-def", nad1Name, "-n", ns)
nad1 := whereaboutsoverlappingIPNAD{
nadname: nad1Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range1,
enableoverlapping: true,
networkname: "",
template: whereaboutsoverlappingIPNADTemplate,
}
nad1.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad1Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad1Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad1Name)
}
exutil.By("Configuring pods to get additional network defined in first NAD")
nad1pod := testMultihomingPod{
name: pod1Name,
namespace: ns,
podlabel: pod1Name,
nadname: nad1Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad1pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Configuring second NetworkAttachmentDefinition with setting true for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2 := whereaboutsoverlappingIPNAD{
nadname: nad2Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range2,
enableoverlapping: true,
networkname: "",
template: whereaboutsoverlappingIPNADTemplate,
}
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the second NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad2Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad2Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad2Name)
}
exutil.By("Configuring pods for additional network defined in second NAD")
nad2pod := testMultihomingPod{
name: pod2Name,
namespace: ns,
podlabel: pod2Name,
nadname: nad2Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
ippool1 := "192.168.20.0-29"
ippool2 := "192.168.20.0-24"
ipaddress1 := "192.168.20.1"
ipaddress2 := "192.168.20.2"
exutil.By("Verifing the correct network_names from ippools")
ippoolsOutput, ippoolsOutputErr := oc.AsAdmin().Run("get").Args("ippools", "-n", "openshift-multus").Output()
o.Expect(ippoolsOutputErr).NotTo(o.HaveOccurred())
o.Expect(ippoolsOutput).To(o.And(o.ContainSubstring(ippool1), o.ContainSubstring(ippool2)))
exutil.By("Verifing there are no ip overlapping IP addresses from overlappingrangeipreservations")
overlappingrangeOutput, overlappingrangeOutputErr := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr).NotTo(o.HaveOccurred())
o.Expect(overlappingrangeOutput).To(o.And(o.ContainSubstring(ipaddress1), o.ContainSubstring(ipaddress2)))
exutil.By("Getting IP from pod1's secondary interface")
pod1List, getPod1Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod1Err).NotTo(o.HaveOccurred())
o.Expect(len(pod1List)).NotTo(o.BeEquivalentTo(0))
pod1Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod1List[0], interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
o.Expect(strings.HasPrefix(pod1Net1IPv4, ipaddress1)).Should(o.BeTrue())
exutil.By("Getting IP from pod2's secondary interface")
pod2List, getPod2Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err).NotTo(o.HaveOccurred())
o.Expect(len(pod2List)).NotTo(o.BeEquivalentTo(0))
pod2Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod2List[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
o.Expect(strings.HasPrefix(pod2Net1IPv4, ipaddress2)).Should(o.BeTrue())
exutil.By("Deleting the second NetworkAttachmentDefinition and responding pods")
removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
removeResource(oc, true, true, "pod", pod2List[0], "-n", ns)
exutil.By("Deleting the secondary network_name from ippools")
removeResource(oc, true, true, "ippools", ippool2, "-n", "openshift-multus")
exutil.By("Reconfiguring second NetworkAttachmentDefinition with setting false for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2.enableoverlapping = false
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Reconfiguring pods for additional network defined in second NAD")
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Verifing these is only one IP in overlappingrangeipreservations")
overlappingrangeOutput1, overlappingrangeOutputErr1 := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr1).NotTo(o.HaveOccurred())
o.Expect(overlappingrangeOutput1).To(o.ContainSubstring(ipaddress1))
o.Expect(overlappingrangeOutput1).NotTo(o.ContainSubstring(ipaddress2))
exutil.By("Getting IP from pod2's secondary interface")
podList2, getPod2Err2 := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err2).NotTo(o.HaveOccurred())
o.Expect(len(podList2)).NotTo(o.BeEquivalentTo(0))
pod3Net1IPv4, _ := getPodMultiNetworks(oc, ns, podList2[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod3Net1IPv4)
o.Expect(strings.HasPrefix(pod3Net1IPv4, ipaddress1)).Should(o.BeTrue())
})
g.It("Author:weliang-Medium-72203-[Multus] NAD using same network_name. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
nad1Name = "ip-overlapping-1"
nad2Name = "ip-overlapping-2"
pod1Name = "ip-overlapping-pod1"
pod2Name = "ip-overlapping-pod2"
ipv4range1 = "192.168.20.0/29"
ipv4range2 = "192.168.20.0/24"
interfaceName = "net1"
networkName = "blue-net"
whereaboutsoverlappingIPNADTemplate = filepath.Join(buildPruningBaseDir, "multus/whereabouts-overlappingIP-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Get the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Configuring first NetworkAttachmentDefinition")
defer removeResource(oc, true, true, "net-attach-def", nad1Name, "-n", ns)
nad1 := whereaboutsoverlappingIPNAD{
nadname: nad1Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range1,
enableoverlapping: true,
networkname: networkName,
template: whereaboutsoverlappingIPNADTemplate,
}
nad1.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad1Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad1Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad1Name)
}
exutil.By("Configuring pods to get additional network defined in first NAD")
nad1pod := testMultihomingPod{
name: pod1Name,
namespace: ns,
podlabel: pod1Name,
nadname: nad1Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad1pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Configuring second NetworkAttachmentDefinition with setting true for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2 := whereaboutsoverlappingIPNAD{
nadname: nad2Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range2,
enableoverlapping: true,
networkname: networkName,
template: whereaboutsoverlappingIPNADTemplate,
}
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the second NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad2Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad2Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad2Name)
}
exutil.By("Configuring pods for additional network defined in second NAD")
nad2pod := testMultihomingPod{
name: pod2Name,
namespace: ns,
podlabel: pod2Name,
nadname: nad2Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
ippool1 := "192.168.20.0-29"
ippool2 := "192.168.20.0-24"
ipaddress1 := "192.168.20.1"
ipaddress2 := "192.168.20.2"
exutil.By("Verifing the correct network_names from ippools")
ippoolsOutput, ippoolsOutputErr := oc.AsAdmin().Run("get").Args("ippools", "-n", "openshift-multus").Output()
o.Expect(ippoolsOutputErr).NotTo(o.HaveOccurred())
o.Expect(ippoolsOutput).To(o.And(o.ContainSubstring(ippool1), o.ContainSubstring(ippool2)))
exutil.By("Verifing there are no ip overlapping IP addresses from overlappingrangeipreservations")
overlappingrangeOutput, overlappingrangeOutputErr := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr).NotTo(o.HaveOccurred())
o.Expect(overlappingrangeOutput).To(o.And(o.ContainSubstring(ipaddress1), o.ContainSubstring(ipaddress2)))
exutil.By("Getting IP from pod1's secondary interface")
pod1List, getPod1Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod1Err).NotTo(o.HaveOccurred())
o.Expect(len(pod1List)).NotTo(o.BeEquivalentTo(0))
pod1Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod1List[0], interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
o.Expect(strings.HasPrefix(pod1Net1IPv4, ipaddress1)).Should(o.BeTrue())
exutil.By("Getting IP from pod2's secondary interface")
pod2List, getPod2Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err).NotTo(o.HaveOccurred())
o.Expect(len(pod2List)).NotTo(o.BeEquivalentTo(0))
pod2Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod2List[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
o.Expect(strings.HasPrefix(pod2Net1IPv4, ipaddress2)).Should(o.BeTrue())
exutil.By("Deleting the second NetworkAttachmentDefinition and corresponding pods")
removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
removeResource(oc, true, true, "pod", pod2List[0], "-n", ns)
exutil.By("Deleting the secondary network_name from ippools")
removeResource(oc, true, true, "ippools", ippool2, "-n", "openshift-multus")
exutil.By("Reconfiguring second NetworkAttachmentDefinition with setting false for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2.enableoverlapping = false
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Reconfiguring pods for additional network defined in second NAD")
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Verifing these is only one IP in overlappingrangeipreservations")
overlappingrangeOutput1, overlappingrangeOutputErr1 := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(overlappingrangeOutput1, ipaddress1)).To(o.BeTrue())
o.Expect(strings.Contains(overlappingrangeOutput1, ipaddress2)).To(o.BeFalse())
exutil.By("Getting IP from pod2's secondary interface")
podList2, getPod2Err2 := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err2).NotTo(o.HaveOccurred())
o.Expect(len(podList2)).NotTo(o.BeEquivalentTo(0))
pod3Net1IPv4, _ := getPodMultiNetworks(oc, ns, podList2[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod3Net1IPv4)
o.Expect(strings.HasPrefix(pod3Net1IPv4, ipaddress1)).Should(o.BeTrue())
})
g.It("Author:weliang-NonPreRelease-Longduration-Medium-74933-whereabouts ips are not reconciled when the node is rebooted forcely. [Disruptive]", func() {
//https://issues.redhat.com/browse/OCPBUGS-35923: whereabouts ips are not reconciled when the node is rebooted forcely
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multusPodTemplate = filepath.Join(buildPruningBaseDir, "multus/multus-Statefulset-pod-template.yaml")
nad1Name = "ip-overlapping-1"
pod1Name = "ip-overlapping-pod1"
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Getting the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Deleting the network_names/ippools/overlapping created from this testing")
ippool1 := "192.168.20.0-24"
ippool2 := "fd00-dead-beef-10---64"
overlapping1 := "192.168.20.1"
overlapping2 := "fd00-dead-beef-10--1"
defer removeResource(oc, true, true, "overlappingrangeipreservations.whereabouts.cni.cncf.io", overlapping1, "-n", "openshift-multus")
defer removeResource(oc, true, true, "overlappingrangeipreservations.whereabouts.cni.cncf.io", overlapping2, "-n", "openshift-multus")
defer removeResource(oc, true, true, "ippools.whereabouts.cni.cncf.io", ippool1, "-n", "openshift-multus")
defer removeResource(oc, true, true, "ippools.whereabouts.cni.cncf.io", ippool2, "-n", "openshift-multus")
exutil.By("Creating a network-attach-defintion")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad1Name, "-n", ns).Execute()
nadns := dualstackNAD{
nadname: nad1Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.20.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
nadns.createDualstackNAD(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad1Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad1Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad1Name)
}
exutil.By("Configuring a pod to get additional network")
replicasnum := strconv.Itoa(1)
nad1pod := testMultusPod{
name: pod1Name,
namespace: ns,
podlabel: pod1Name,
nadname: nad1Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
replicas: replicasnum,
template: multusPodTemplate,
}
defer removeResource(oc, true, true, "pod", nad1pod.name, "-n", ns)
nad1pod.createTestMultusPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
ipaddress1 := "192.168.20.1"
ipaddress2 := "fd00:dead:beef:10::1"
interfaceName := "net1"
exutil.By("Getting IP from pod's secondary interface")
pod1List, getPod1Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod1Err).NotTo(o.HaveOccurred())
o.Expect(len(pod1List)).NotTo(o.BeEquivalentTo(0))
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, ns, pod1List[0], interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, ipaddress1)).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, ipaddress2)).Should(o.BeTrue())
exutil.By("Rebooting the node where the statefulset pod is deployed")
clusterOperators := []string{"dns", "ingress", "storage"}
for _, operator := range clusterOperators {
defer waitForClusterOperatorState(oc, operator, 100, 3, "True.*False.*False")
}
defer waitForNetworkOperatorState(oc, 100, 3, "True.*False.*False")
defer checkNodeStatus(oc, nodeList.Items[0].Name, "Ready")
forceRebootNode(oc, nodeList.Items[0].Name)
exutil.By("Waiting for the StatefulSet pod to be deployed again")
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Getting IP from redployed pod's secondary interface, and both ipv4 and ipv6 are same as the ones pod get before.")
pod2List, getPod2Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod2Err).NotTo(o.HaveOccurred())
o.Expect(len(pod2List)).NotTo(o.BeEquivalentTo(0))
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, ns, pod2List[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, ipaddress1)).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, ipaddress2)).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:weliang-Medium-76652-Support for Dummy CNI", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
netAttachDefFile = filepath.Join(buildPruningBaseDir, "multus/support-dummy-CNI-NAD.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
exutil.By("Getting the name of namespace")
ns := oc.Namespace()
nadNames := []string{"dummy-net", "mynet-a", "mynet-b"}
exutil.By("Create three network-attach-defintions in the test namespace")
defer removeResource(oc, true, true, "net-attach-def", nadNames[0], "-n", ns)
defer removeResource(oc, true, true, "net-attach-def", nadNames[1], "-n", ns)
defer removeResource(oc, true, true, "net-attach-def", nadNames[2], "-n", ns)
netAttachDefErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns).Execute()
o.Expect(netAttachDefErr).NotTo(o.HaveOccurred())
exutil.By("Checking if three network-attach-defintions are created")
for _, nadName := range nadNames {
if checkNAD(oc, ns, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
}
exutil.By("Creating 1st pod consuming NAD/mynet-b")
pod1 := testMultihomingPod{
name: "sampleclient",
namespace: ns,
podlabel: "sampleclient",
nadname: nadNames[2],
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name=sampleclient")).NotTo(o.HaveOccurred())
twoNadNames := nadNames[0] + "," + nadNames[1]
exutil.By("Creating 2nd pod consuming NAD/dummy-net + mynet-a")
pod2 := testMultihomingPod{
name: "sampleserver",
namespace: ns,
podlabel: "sampleserver",
nadname: twoNadNames,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name=sampleserver")).NotTo(o.HaveOccurred())
exutil.By("Getting pods names")
clientPod, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=sampleclient")
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(len(clientPod)).NotTo(o.BeEquivalentTo(0))
exutil.By("5. Checking the service of dummy interface is accessible")
o.Eventually(func() error {
_, err := e2eoutput.RunHostCmd(ns, clientPod[0], "curl 10.10.10.2:8080 --connect-timeout 5")
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred(), "The service of dummy interface is NOT accessible")
})
// author: [email protected]
g.It("Author:weliang-Medium-79604-Failed to create the sandbox-plugin on multus daemonset rollout [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-48160
exutil.By("Getting the count of multus-pods")
allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, "openshift-multus", "app=multus")
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(len(allPods)).ShouldNot(o.Equal(0))
defer func() {
errCVO := oc.AsAdmin().Run("scale").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "--replicas=1").Execute()
o.Expect(errCVO).NotTo(o.HaveOccurred())
errCNO := oc.AsAdmin().Run("scale").Args("-n", "openshift-network-operator", "deploy/network-operator", "--replicas=1").Execute()
o.Expect(errCNO).NotTo(o.HaveOccurred())
}()
exutil.By("Disabling CVO and CNO")
errCVO := oc.AsAdmin().Run("scale").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "--replicas=0").Execute()
o.Expect(errCVO).NotTo(o.HaveOccurred())
errCNO := oc.AsAdmin().Run("scale").Args("-n", "openshift-network-operator", "deploy/network-operator", "--replicas=0").Execute()
o.Expect(errCNO).NotTo(o.HaveOccurred())
exutil.By("Disabling daemonset by adding an invalid NodeSelector")
_, errMultus := oc.AsAdmin().WithoutNamespace().Run("patch").
Args("daemonset.apps/multus", "-n", "openshift-multus",
"-p", `{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/os":"linuxandwindow"}}}}}`,
"--type=merge").Output()
o.Expect(errMultus).NotTo(o.HaveOccurred())
exutil.By("Verifying all multus pods are deleted")
err := waitForPodsCount(oc, "openshift-multus", "app=multus", 0, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Enabling daemonset by restoring the default NodeSelector")
_, errMultus = oc.AsAdmin().WithoutNamespace().Run("patch").
Args("daemonset.apps/multus", "-n", "openshift-multus",
"-p", `{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/os":"linux"}}}}}`,
"--type=merge").Output()
o.Expect(errMultus).NotTo(o.HaveOccurred())
exutil.By("Verifying all multus pods are recreated")
err = waitForPodsCount(oc, "openshift-multus", "app=multus", len(allPods), 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Enabling CVO and CNO")
errCVO = oc.AsAdmin().Run("scale").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "--replicas=1").Execute()
o.Expect(errCVO).NotTo(o.HaveOccurred())
errCNO = oc.AsAdmin().Run("scale").Args("-n", "openshift-network-operator", "deploy/network-operator", "--replicas=1").Execute()
o.Expect(errCNO).NotTo(o.HaveOccurred())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
193ed7dd-fbf9-40a5-8a2f-a123901ffbd0
|
NonHyperShiftHOST-Author:weliang-High-57589-Whereabouts CNI timesout while iterating exclude range
|
['"context"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("NonHyperShiftHOST-Author:weliang-High-57589-Whereabouts CNI timesout while iterating exclude range", func() {
//https://issues.redhat.com/browse/OCPBUGS-2948 : Whereabouts CNI timesout while iterating exclude range
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
netAttachDefFile1 = filepath.Join(buildPruningBaseDir, "multus/ipv6-excludes-largeranges-NAD.yaml")
multusPodTemplate = filepath.Join(buildPruningBaseDir, "multinetworkpolicy/MultiNetworkPolicy-pod-template.yaml")
)
ns1 := oc.Namespace()
g.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a custom resource network-attach-defintion in tested namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", netAttachDefFile1, "-n", ns1).Execute()
netAttachDefErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile1, "-n", ns1).Execute()
o.Expect(netAttachDefErr).NotTo(o.HaveOccurred())
netAttachDefOutput, netAttachDefOutputErr := oc.Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(netAttachDefOutputErr).NotTo(o.HaveOccurred())
o.Expect(netAttachDefOutput).To(o.ContainSubstring("nad-w-excludes"))
g.By("Create a multus pod to use above network-attach-defintion")
ns1MultusPod1 := testPodMultinetwork{
name: "ns1-multuspod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
nadname: "nad-w-excludes",
labelname: "blue-multuspod",
template: multusPodTemplate,
}
ns1MultusPod1.createTestPodMultinetwork(oc)
waitPodReady(oc, ns1MultusPod1.namespace, ns1MultusPod1.name)
g.By("check the created multus pod to get the right ipv6 CIDR")
multusPodIPv6 := getPodMultiNetworkIPv6(oc, ns1, ns1MultusPod1.name)
e2e.Logf("The v6 address of pod's second interface is: %v", multusPodIPv6)
o.Expect(strings.HasPrefix(multusPodIPv6, "fd43:11f1:3daa:bbaa::")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e1637912-be09-44c9-9624-0719f87927df
|
NonHyperShiftHOST-Author:weliang-High-59875-Configure ignored namespaces into multus-admission-controller
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("NonHyperShiftHOST-Author:weliang-High-59875-Configure ignored namespaces into multus-admission-controller", func() {
//https://issues.redhat.com/browse/OCPBUGS-6499:Configure ignored namespaces into multus-admission-controller
ns1 := "openshift-multus"
expectedOutpu := "-ignore-namespaces"
g.By("Check multus-admission-controller is configured with ignore-namespaces")
multusOutput, multusErr := oc.AsAdmin().Run("get").Args("deployment.apps/multus-admission-controller", "-n", ns1, "-o=jsonpath={.spec.template.spec.containers[0].command[2]}").Output()
exutil.AssertWaitPollNoErr(multusErr, "The deployment.apps/multus-admission-controller is not created")
o.Expect(multusOutput).To(o.ContainSubstring(expectedOutpu))
g.By("Check all multus-additional-cni-plugins pods are Running well")
o.Expect(waitForPodWithLabelReady(oc, ns1, "app=multus-additional-cni-plugins")).NotTo(o.HaveOccurred())
})
| ||||||
test case
|
openshift/openshift-tests-private
|
bbc8ebdb-986f-4ccf-95c7-675e9a6962e9
|
NonHyperShiftHOST-Author:weliang-Medium-59440-Verify whereabouts-reconciler after creating additionalNetworks. [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-59440-Verify whereabouts-reconciler after creating additionalNetworks. [Serial]", func() {
var (
patchSResource = "networks.operator.openshift.io/cluster"
patchInfo = fmt.Sprintf(`{"spec":{ "additionalNetworks": [{"name": "whereabouts-shim", "namespace": "default","rawCNIConfig":"{\"cniVersion\":\"0.3.0\",\"type\":\"bridge\",\"name\":\"cnitest0\",\"ipam\": {\"type\":\"whereabouts\",\"subnet\":\"192.0.2.0/24\"}}","type":"Raw"}]}}`)
ns = "openshift-multus"
)
g.By("Check there are no whereabouts-reconciler pods and ds in the openshift-multus namespace before creating additionalNetworks ")
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", "app=whereabouts-reconciler", "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(podStatus).To(o.BeEmpty())
_, dsErrBefore := oc.AsAdmin().Run("get").Args("daemonset.apps/whereabouts-reconciler", "-n", ns).Output()
o.Expect(dsErrBefore).To(o.HaveOccurred())
g.By("Add additionalNetworks through network operator")
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", `[{"op": "remove", "path": "/spec/additionalNetworks"}]`, "--type=json").Execute()
g.By("Check NetworkOperatorStatus to ensure the cluster is health after modification")
checkNetworkOperatorState(oc, 10, 60)
}()
patchResourceAsAdmin(oc, patchSResource, patchInfo)
g.By("Check whereabouts-reconciler pods and ds are created in the openshift-multus namespace after creating additionalNetworks ")
o.Expect(waitForPodWithLabelReady(oc, ns, "app=whereabouts-reconciler")).NotTo(o.HaveOccurred())
dsOutput, dsErrAfter := oc.AsAdmin().Run("get").Args("daemonset.apps/whereabouts-reconciler", "-n", ns).Output()
o.Expect(dsErrAfter).NotTo(o.HaveOccurred())
o.Expect(dsOutput).To(o.ContainSubstring("whereabouts-reconciler"))
g.By("Check there are no whereabouts-reconciler pods and ds in the openshift-multus namespace after deleting additionalNetworks ")
oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", `[{"op": "remove", "path": "/spec/additionalNetworks"}]`, "--type=json").Execute()
o.Eventually(func() bool {
result := true
_, err := oc.AsAdmin().Run("get").Args("pod", "-n", ns, "-l", "app=whereabouts-reconciler").Output()
if err != nil {
e2e.Logf("Wait for whereabouts-reconciler pods to be deleted")
result = false
}
return result
}, "60s", "5s").Should(o.BeTrue(), fmt.Sprintf("whereabouts-reconciler pods are not deleted"))
o.Eventually(func() bool {
result := true
_, err := oc.AsAdmin().Run("get").Args("daemonset.apps/whereabouts-reconciler", "-n", ns).Output()
if err != nil {
e2e.Logf("Wait for daemonset.apps/whereabouts-reconciler to be deleted")
result = false
}
return result
}, "60s", "5s").Should(o.BeTrue(), fmt.Sprintf("daemonset.apps/whereabouts-reconciler is not deleted"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
3613bd1f-fbea-4d08-b213-a3e9f01c89f3
|
NonHyperShiftHOST-Author:weliang-Medium-64958-Unable to set default-route when istio sidecar is injected. [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-64958-Unable to set default-route when istio sidecar is injected. [Serial]", func() {
//https://issues.redhat.com/browse/OCPBUGS-7844
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
netAttachDefFile = filepath.Join(buildPruningBaseDir, "multus/istiosidecar-NAD.yaml")
testPod = filepath.Join(buildPruningBaseDir, "multus/istiosidecar-pod.yaml")
)
exutil.By("Create a new namespace")
ns1 := "test-64958"
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns1)
oc.CreateSpecifiedNamespaceAsAdmin(ns1)
exutil.By("Create a custom resource network-attach-defintion in the namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", netAttachDefFile, "-n", ns1).Execute()
netAttachDefErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns1).Execute()
o.Expect(netAttachDefErr).NotTo(o.HaveOccurred())
netAttachDefOutput, netAttachDefOutputErr := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(netAttachDefOutputErr).NotTo(o.HaveOccurred())
o.Expect(netAttachDefOutput).To(o.ContainSubstring("test-nad"))
exutil.By("Create a pod consuming above network-attach-defintion in ns1")
createResourceFromFile(oc, ns1, testPod)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=testpod")).NotTo(o.HaveOccurred(), "The test pod in ns/%s is not ready", ns1)
exutil.By("Check the default-route is created when istio sidecar is injected")
routeLog, routeErr := execCommandInSpecificPod(oc, ns1, "testpod", "ip route")
o.Expect(routeErr).NotTo(o.HaveOccurred())
o.Expect(routeLog).To(o.ContainSubstring("default via 172.19.55.99 dev net1"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
ebf0b622-d725-42f5-a0af-25121c74f09a
|
NonHyperShiftHOST-Author:weliang-Medium-66876-Support Dual Stack IP assignment for whereabouts CNI/IPAM
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-66876-Support Dual Stack IP assignment for whereabouts CNI/IPAM", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
nadName = "dualstack"
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes")
}
exutil.By("Get the name of namespace")
ns1 := oc.Namespace()
exutil.By("Create a custom resource network-attach-defintion in the test namespace")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns1).Execute()
nad1ns1 := dualstackNAD{
nadname: nadName,
namespace: ns1,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.10.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
nad1ns1.createDualstackNAD(oc)
exutil.By("Check if the network-attach-defintion is created")
if checkNAD(oc, ns1, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Create 1st pod consuming above network-attach-defintion in ns1")
pod1 := testMultihomingPod{
name: "dualstack-pod-1",
namespace: ns1,
podlabel: "dualstack-pod1",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=dualstack-pod1")).NotTo(o.HaveOccurred())
exutil.By("Create 2nd pod consuming above network-attach-defintion in ns1")
pod2 := testMultihomingPod{
name: "dualstack-pod-2",
namespace: ns1,
podlabel: "dualstack-pod2",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=dualstack-pod2")).NotTo(o.HaveOccurred())
exutil.By("Get two pods' name")
podList, podListErr := exutil.GetAllPods(oc, ns1)
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podList)).Should(o.Equal(2))
exutil.By("Get IPs of the pod1ns1's secondary interface in first namespace.")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, podList[0])
e2e.Logf("The v4 address of pod1ns1is: %v", pod1ns1IPv4)
e2e.Logf("The v6 address of pod1ns1is: %v", pod1ns1IPv6)
exutil.By("Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, podList[1])
e2e.Logf("The v4 address of pod2ns1is: %v", pod2ns1IPv4)
e2e.Logf("The v6 address of pod2ns1is: %v", pod2ns1IPv6)
g.By("Both ipv4 and ipv6 curl should pass between two pods")
curlPod2PodMultiNetworkPass(oc, ns1, podList[0], pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[1], pod1ns1IPv4, pod1ns1IPv6)
})
| |||||
test case
|
openshift/openshift-tests-private
|
28e6446f-70db-456b-b880-03fe91c30189
|
NonHyperShiftHOST-Author:weliang-Medium-69947-The macvlan pod will send Unsolicited Neighbor Advertisements after it is created
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("NonHyperShiftHOST-Author:weliang-Medium-69947-The macvlan pod will send Unsolicited Neighbor Advertisements after it is created", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
nadName = "whereabouts-dualstack"
sniffMultusPodTemplate = filepath.Join(buildPruningBaseDir, "multus/sniff-multus-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Get the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Create a custom resource network-attach-defintion")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nadName, "-n", ns).Execute()
nadns := dualstackNAD{
nadname: nadName,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.10.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
nadns.createDualstackNAD(oc)
exutil.By("Check if the network-attach-defintion is created")
if checkNAD(oc, ns, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Create a sniff pod to capture the traffic from pod's secondary network")
pod1 := testPodMultinetwork{
name: "sniff-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
nadname: nadName,
labelname: "sniff-pod",
template: sniffMultusPodTemplate,
}
pod1.createTestPodMultinetwork(oc)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name="+pod1.labelname), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", pod1.labelname))
exutil.By("The sniff pod start to capture the Unsolicited Neighbor Advertisements from pod's secondary network")
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod1.labelname, "bash", "-c",
`timeout --preserve-status 30 tcpdump -e -i net1 icmp6 and icmp6[0] = 136 -nvvv`).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a pod consuming above network-attach-defintion")
pod2 := testMultihomingPod{
name: "dualstack-pod",
namespace: ns,
podlabel: "dualstack-pod",
nadname: nadName,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, ns, "name="+pod2.podlabel), fmt.Sprintf("Waiting for pod with label name=%s become ready timeout", pod2.podlabel))
exutil.By("The sniff pod will get Unsolicited Neighbor Advertisements, not neighbor solicitation")
cmdErr := cmdTcpdump.Wait()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(cmdOutput.String(), "Flags [solicited]")).NotTo(o.BeTrue(), cmdOutput.String())
})
| |||||
test case
|
openshift/openshift-tests-private
|
fcbbeb56-6f2b-4388-b41f-056dadd300aa
|
Author:weliang-Medium-72202-[Multus] NAD without configuring network_name. [Disruptive]
|
['"context"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("Author:weliang-Medium-72202-[Multus] NAD without configuring network_name. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
nad1Name = "ip-overlapping-1"
nad2Name = "ip-overlapping-2"
pod1Name = "ip-overlapping-pod1"
pod2Name = "ip-overlapping-pod2"
ipv4range1 = "192.168.20.0/29"
ipv4range2 = "192.168.20.0/24"
interfaceName = "net1"
whereaboutsoverlappingIPNADTemplate = filepath.Join(buildPruningBaseDir, "multus/whereabouts-overlappingIP-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Get the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Configuring first NetworkAttachmentDefinition")
defer removeResource(oc, true, true, "net-attach-def", nad1Name, "-n", ns)
nad1 := whereaboutsoverlappingIPNAD{
nadname: nad1Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range1,
enableoverlapping: true,
networkname: "",
template: whereaboutsoverlappingIPNADTemplate,
}
nad1.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad1Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad1Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad1Name)
}
exutil.By("Configuring pods to get additional network defined in first NAD")
nad1pod := testMultihomingPod{
name: pod1Name,
namespace: ns,
podlabel: pod1Name,
nadname: nad1Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad1pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Configuring second NetworkAttachmentDefinition with setting true for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2 := whereaboutsoverlappingIPNAD{
nadname: nad2Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range2,
enableoverlapping: true,
networkname: "",
template: whereaboutsoverlappingIPNADTemplate,
}
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the second NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad2Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad2Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad2Name)
}
exutil.By("Configuring pods for additional network defined in second NAD")
nad2pod := testMultihomingPod{
name: pod2Name,
namespace: ns,
podlabel: pod2Name,
nadname: nad2Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
ippool1 := "192.168.20.0-29"
ippool2 := "192.168.20.0-24"
ipaddress1 := "192.168.20.1"
ipaddress2 := "192.168.20.2"
exutil.By("Verifing the correct network_names from ippools")
ippoolsOutput, ippoolsOutputErr := oc.AsAdmin().Run("get").Args("ippools", "-n", "openshift-multus").Output()
o.Expect(ippoolsOutputErr).NotTo(o.HaveOccurred())
o.Expect(ippoolsOutput).To(o.And(o.ContainSubstring(ippool1), o.ContainSubstring(ippool2)))
exutil.By("Verifing there are no ip overlapping IP addresses from overlappingrangeipreservations")
overlappingrangeOutput, overlappingrangeOutputErr := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr).NotTo(o.HaveOccurred())
o.Expect(overlappingrangeOutput).To(o.And(o.ContainSubstring(ipaddress1), o.ContainSubstring(ipaddress2)))
exutil.By("Getting IP from pod1's secondary interface")
pod1List, getPod1Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod1Err).NotTo(o.HaveOccurred())
o.Expect(len(pod1List)).NotTo(o.BeEquivalentTo(0))
pod1Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod1List[0], interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
o.Expect(strings.HasPrefix(pod1Net1IPv4, ipaddress1)).Should(o.BeTrue())
exutil.By("Getting IP from pod2's secondary interface")
pod2List, getPod2Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err).NotTo(o.HaveOccurred())
o.Expect(len(pod2List)).NotTo(o.BeEquivalentTo(0))
pod2Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod2List[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
o.Expect(strings.HasPrefix(pod2Net1IPv4, ipaddress2)).Should(o.BeTrue())
exutil.By("Deleting the second NetworkAttachmentDefinition and responding pods")
removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
removeResource(oc, true, true, "pod", pod2List[0], "-n", ns)
exutil.By("Deleting the secondary network_name from ippools")
removeResource(oc, true, true, "ippools", ippool2, "-n", "openshift-multus")
exutil.By("Reconfiguring second NetworkAttachmentDefinition with setting false for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2.enableoverlapping = false
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Reconfiguring pods for additional network defined in second NAD")
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Verifing these is only one IP in overlappingrangeipreservations")
overlappingrangeOutput1, overlappingrangeOutputErr1 := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr1).NotTo(o.HaveOccurred())
o.Expect(overlappingrangeOutput1).To(o.ContainSubstring(ipaddress1))
o.Expect(overlappingrangeOutput1).NotTo(o.ContainSubstring(ipaddress2))
exutil.By("Getting IP from pod2's secondary interface")
podList2, getPod2Err2 := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err2).NotTo(o.HaveOccurred())
o.Expect(len(podList2)).NotTo(o.BeEquivalentTo(0))
pod3Net1IPv4, _ := getPodMultiNetworks(oc, ns, podList2[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod3Net1IPv4)
o.Expect(strings.HasPrefix(pod3Net1IPv4, ipaddress1)).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
dced555a-079f-4670-ad6f-6ae3aefcd658
|
Author:weliang-Medium-72203-[Multus] NAD using same network_name. [Disruptive]
|
['"context"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("Author:weliang-Medium-72203-[Multus] NAD using same network_name. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
nad1Name = "ip-overlapping-1"
nad2Name = "ip-overlapping-2"
pod1Name = "ip-overlapping-pod1"
pod2Name = "ip-overlapping-pod2"
ipv4range1 = "192.168.20.0/29"
ipv4range2 = "192.168.20.0/24"
interfaceName = "net1"
networkName = "blue-net"
whereaboutsoverlappingIPNADTemplate = filepath.Join(buildPruningBaseDir, "multus/whereabouts-overlappingIP-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Get the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Configuring first NetworkAttachmentDefinition")
defer removeResource(oc, true, true, "net-attach-def", nad1Name, "-n", ns)
nad1 := whereaboutsoverlappingIPNAD{
nadname: nad1Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range1,
enableoverlapping: true,
networkname: networkName,
template: whereaboutsoverlappingIPNADTemplate,
}
nad1.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad1Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad1Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad1Name)
}
exutil.By("Configuring pods to get additional network defined in first NAD")
nad1pod := testMultihomingPod{
name: pod1Name,
namespace: ns,
podlabel: pod1Name,
nadname: nad1Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad1pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Configuring second NetworkAttachmentDefinition with setting true for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2 := whereaboutsoverlappingIPNAD{
nadname: nad2Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: ipv4range2,
enableoverlapping: true,
networkname: networkName,
template: whereaboutsoverlappingIPNADTemplate,
}
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Verifying the second NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad2Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad2Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad2Name)
}
exutil.By("Configuring pods for additional network defined in second NAD")
nad2pod := testMultihomingPod{
name: pod2Name,
namespace: ns,
podlabel: pod2Name,
nadname: nad2Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
ippool1 := "192.168.20.0-29"
ippool2 := "192.168.20.0-24"
ipaddress1 := "192.168.20.1"
ipaddress2 := "192.168.20.2"
exutil.By("Verifing the correct network_names from ippools")
ippoolsOutput, ippoolsOutputErr := oc.AsAdmin().Run("get").Args("ippools", "-n", "openshift-multus").Output()
o.Expect(ippoolsOutputErr).NotTo(o.HaveOccurred())
o.Expect(ippoolsOutput).To(o.And(o.ContainSubstring(ippool1), o.ContainSubstring(ippool2)))
exutil.By("Verifing there are no ip overlapping IP addresses from overlappingrangeipreservations")
overlappingrangeOutput, overlappingrangeOutputErr := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr).NotTo(o.HaveOccurred())
o.Expect(overlappingrangeOutput).To(o.And(o.ContainSubstring(ipaddress1), o.ContainSubstring(ipaddress2)))
exutil.By("Getting IP from pod1's secondary interface")
pod1List, getPod1Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod1Err).NotTo(o.HaveOccurred())
o.Expect(len(pod1List)).NotTo(o.BeEquivalentTo(0))
pod1Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod1List[0], interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
o.Expect(strings.HasPrefix(pod1Net1IPv4, ipaddress1)).Should(o.BeTrue())
exutil.By("Getting IP from pod2's secondary interface")
pod2List, getPod2Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err).NotTo(o.HaveOccurred())
o.Expect(len(pod2List)).NotTo(o.BeEquivalentTo(0))
pod2Net1IPv4, _ := getPodMultiNetworks(oc, ns, pod2List[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
o.Expect(strings.HasPrefix(pod2Net1IPv4, ipaddress2)).Should(o.BeTrue())
exutil.By("Deleting the second NetworkAttachmentDefinition and corresponding pods")
removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
removeResource(oc, true, true, "pod", pod2List[0], "-n", ns)
exutil.By("Deleting the secondary network_name from ippools")
removeResource(oc, true, true, "ippools", ippool2, "-n", "openshift-multus")
exutil.By("Reconfiguring second NetworkAttachmentDefinition with setting false for enable_overlapping_ranges")
defer removeResource(oc, true, true, "net-attach-def", nad2Name, "-n", ns)
nad2.enableoverlapping = false
nad2.createWhereaboutsoverlappingIPNAD(oc)
exutil.By("Reconfiguring pods for additional network defined in second NAD")
nad2pod.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad2pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Verifing these is only one IP in overlappingrangeipreservations")
overlappingrangeOutput1, overlappingrangeOutputErr1 := oc.AsAdmin().Run("get").Args("overlappingrangeipreservations", "-A", "-n", "openshift-multus").Output()
o.Expect(overlappingrangeOutputErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(overlappingrangeOutput1, ipaddress1)).To(o.BeTrue())
o.Expect(strings.Contains(overlappingrangeOutput1, ipaddress2)).To(o.BeFalse())
exutil.By("Getting IP from pod2's secondary interface")
podList2, getPod2Err2 := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad2pod.podlabel)
o.Expect(getPod2Err2).NotTo(o.HaveOccurred())
o.Expect(len(podList2)).NotTo(o.BeEquivalentTo(0))
pod3Net1IPv4, _ := getPodMultiNetworks(oc, ns, podList2[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod3Net1IPv4)
o.Expect(strings.HasPrefix(pod3Net1IPv4, ipaddress1)).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e1d5fc13-892e-4866-9216-7d96e3055bd1
|
Author:weliang-NonPreRelease-Longduration-Medium-74933-whereabouts ips are not reconciled when the node is rebooted forcely. [Disruptive]
|
['"context"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("Author:weliang-NonPreRelease-Longduration-Medium-74933-whereabouts ips are not reconciled when the node is rebooted forcely. [Disruptive]", func() {
//https://issues.redhat.com/browse/OCPBUGS-35923: whereabouts ips are not reconciled when the node is rebooted forcely
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multusPodTemplate = filepath.Join(buildPruningBaseDir, "multus/multus-Statefulset-pod-template.yaml")
nad1Name = "ip-overlapping-1"
pod1Name = "ip-overlapping-pod1"
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one worker node")
}
exutil.By("Getting the name of namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("Deleting the network_names/ippools/overlapping created from this testing")
ippool1 := "192.168.20.0-24"
ippool2 := "fd00-dead-beef-10---64"
overlapping1 := "192.168.20.1"
overlapping2 := "fd00-dead-beef-10--1"
defer removeResource(oc, true, true, "overlappingrangeipreservations.whereabouts.cni.cncf.io", overlapping1, "-n", "openshift-multus")
defer removeResource(oc, true, true, "overlappingrangeipreservations.whereabouts.cni.cncf.io", overlapping2, "-n", "openshift-multus")
defer removeResource(oc, true, true, "ippools.whereabouts.cni.cncf.io", ippool1, "-n", "openshift-multus")
defer removeResource(oc, true, true, "ippools.whereabouts.cni.cncf.io", ippool2, "-n", "openshift-multus")
exutil.By("Creating a network-attach-defintion")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad1Name, "-n", ns).Execute()
nadns := dualstackNAD{
nadname: nad1Name,
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.20.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
nadns.createDualstackNAD(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns, nad1Name) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nad1Name)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nad1Name)
}
exutil.By("Configuring a pod to get additional network")
replicasnum := strconv.Itoa(1)
nad1pod := testMultusPod{
name: pod1Name,
namespace: ns,
podlabel: pod1Name,
nadname: nad1Name,
nodename: nodeList.Items[0].Name,
podenvname: "",
replicas: replicasnum,
template: multusPodTemplate,
}
defer removeResource(oc, true, true, "pod", nad1pod.name, "-n", ns)
nad1pod.createTestMultusPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
ipaddress1 := "192.168.20.1"
ipaddress2 := "fd00:dead:beef:10::1"
interfaceName := "net1"
exutil.By("Getting IP from pod's secondary interface")
pod1List, getPod1Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod1Err).NotTo(o.HaveOccurred())
o.Expect(len(pod1List)).NotTo(o.BeEquivalentTo(0))
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, ns, pod1List[0], interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, ipaddress1)).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, ipaddress2)).Should(o.BeTrue())
exutil.By("Rebooting the node where the statefulset pod is deployed")
clusterOperators := []string{"dns", "ingress", "storage"}
for _, operator := range clusterOperators {
defer waitForClusterOperatorState(oc, operator, 100, 3, "True.*False.*False")
}
defer waitForNetworkOperatorState(oc, 100, 3, "True.*False.*False")
defer checkNodeStatus(oc, nodeList.Items[0].Name, "Ready")
forceRebootNode(oc, nodeList.Items[0].Name)
exutil.By("Waiting for the StatefulSet pod to be deployed again")
o.Expect(waitForPodWithLabelReady(oc, ns, "name="+nad1pod.podlabel)).NotTo(o.HaveOccurred())
exutil.By("Getting IP from redployed pod's secondary interface, and both ipv4 and ipv6 are same as the ones pod get before.")
pod2List, getPod2Err := exutil.GetAllPodsWithLabel(oc, ns, "name="+nad1pod.podlabel)
o.Expect(getPod2Err).NotTo(o.HaveOccurred())
o.Expect(len(pod2List)).NotTo(o.BeEquivalentTo(0))
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, ns, pod2List[0], interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, ipaddress1)).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, ipaddress2)).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
96dcbdb5-ae42-4a72-a5f0-66c07251f82c
|
Author:weliang-Medium-76652-Support for Dummy CNI
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("Author:weliang-Medium-76652-Support for Dummy CNI", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
netAttachDefFile = filepath.Join(buildPruningBaseDir, "multus/support-dummy-CNI-NAD.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
exutil.By("Getting the name of namespace")
ns := oc.Namespace()
nadNames := []string{"dummy-net", "mynet-a", "mynet-b"}
exutil.By("Create three network-attach-defintions in the test namespace")
defer removeResource(oc, true, true, "net-attach-def", nadNames[0], "-n", ns)
defer removeResource(oc, true, true, "net-attach-def", nadNames[1], "-n", ns)
defer removeResource(oc, true, true, "net-attach-def", nadNames[2], "-n", ns)
netAttachDefErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns).Execute()
o.Expect(netAttachDefErr).NotTo(o.HaveOccurred())
exutil.By("Checking if three network-attach-defintions are created")
for _, nadName := range nadNames {
if checkNAD(oc, ns, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
}
exutil.By("Creating 1st pod consuming NAD/mynet-b")
pod1 := testMultihomingPod{
name: "sampleclient",
namespace: ns,
podlabel: "sampleclient",
nadname: nadNames[2],
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name=sampleclient")).NotTo(o.HaveOccurred())
twoNadNames := nadNames[0] + "," + nadNames[1]
exutil.By("Creating 2nd pod consuming NAD/dummy-net + mynet-a")
pod2 := testMultihomingPod{
name: "sampleserver",
namespace: ns,
podlabel: "sampleserver",
nadname: twoNadNames,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
o.Expect(waitForPodWithLabelReady(oc, ns, "name=sampleserver")).NotTo(o.HaveOccurred())
exutil.By("Getting pods names")
clientPod, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=sampleclient")
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(len(clientPod)).NotTo(o.BeEquivalentTo(0))
exutil.By("5. Checking the service of dummy interface is accessible")
o.Eventually(func() error {
_, err := e2eoutput.RunHostCmd(ns, clientPod[0], "curl 10.10.10.2:8080 --connect-timeout 5")
return err
}, "60s", "10s").ShouldNot(o.HaveOccurred(), "The service of dummy interface is NOT accessible")
})
| |||||
test case
|
openshift/openshift-tests-private
|
6bab3652-f4d4-422d-915e-21e7f188b681
|
Author:weliang-Medium-79604-Failed to create the sandbox-plugin on multus daemonset rollout [Disruptive]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multus.go
|
g.It("Author:weliang-Medium-79604-Failed to create the sandbox-plugin on multus daemonset rollout [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-48160
exutil.By("Getting the count of multus-pods")
allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, "openshift-multus", "app=multus")
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(len(allPods)).ShouldNot(o.Equal(0))
defer func() {
errCVO := oc.AsAdmin().Run("scale").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "--replicas=1").Execute()
o.Expect(errCVO).NotTo(o.HaveOccurred())
errCNO := oc.AsAdmin().Run("scale").Args("-n", "openshift-network-operator", "deploy/network-operator", "--replicas=1").Execute()
o.Expect(errCNO).NotTo(o.HaveOccurred())
}()
exutil.By("Disabling CVO and CNO")
errCVO := oc.AsAdmin().Run("scale").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "--replicas=0").Execute()
o.Expect(errCVO).NotTo(o.HaveOccurred())
errCNO := oc.AsAdmin().Run("scale").Args("-n", "openshift-network-operator", "deploy/network-operator", "--replicas=0").Execute()
o.Expect(errCNO).NotTo(o.HaveOccurred())
exutil.By("Disabling daemonset by adding an invalid NodeSelector")
_, errMultus := oc.AsAdmin().WithoutNamespace().Run("patch").
Args("daemonset.apps/multus", "-n", "openshift-multus",
"-p", `{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/os":"linuxandwindow"}}}}}`,
"--type=merge").Output()
o.Expect(errMultus).NotTo(o.HaveOccurred())
exutil.By("Verifying all multus pods are deleted")
err := waitForPodsCount(oc, "openshift-multus", "app=multus", 0, 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Enabling daemonset by restoring the default NodeSelector")
_, errMultus = oc.AsAdmin().WithoutNamespace().Run("patch").
Args("daemonset.apps/multus", "-n", "openshift-multus",
"-p", `{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/os":"linux"}}}}}`,
"--type=merge").Output()
o.Expect(errMultus).NotTo(o.HaveOccurred())
exutil.By("Verifying all multus pods are recreated")
err = waitForPodsCount(oc, "openshift-multus", "app=multus", len(allPods), 5*time.Second, 20*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Enabling CVO and CNO")
errCVO = oc.AsAdmin().Run("scale").Args("-n", "openshift-cluster-version", "deployments/cluster-version-operator", "--replicas=1").Execute()
o.Expect(errCVO).NotTo(o.HaveOccurred())
errCNO = oc.AsAdmin().Run("scale").Args("-n", "openshift-network-operator", "deploy/network-operator", "--replicas=1").Execute()
o.Expect(errCNO).NotTo(o.HaveOccurred())
})
| |||||
file
|
openshift/openshift-tests-private
|
a48f19de-7157-46aa-bb33-0a6c451b74b6
|
nad_utils
|
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
netutils "k8s.io/utils/net"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
package networking
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
netutils "k8s.io/utils/net"
)
type udnPodResource struct {
name string
namespace string
label string
template string
}
type udnPodResourceNode struct {
name string
namespace string
label string
nodename string
template string
}
type udnPodSecNADResource struct {
name string
namespace string
label string
annotation string
template string
}
type udnPodSecNADResourceNode struct {
name string
namespace string
label string
nadname string
nodename string
template string
}
type udnNetDefResource struct {
nadname string
namespace string
nad_network_name string
topology string
subnet string
mtu int32
net_attach_def_name string
role string
template string
}
type udnCRDResource struct {
crdname string
namespace string
IPv4cidr string
IPv4prefix int32
IPv6cidr string
IPv6prefix int32
cidr string
prefix int32
mtu int32
role string
template string
}
type cudnCRDResource struct {
crdname string
labelvalue string
labelkey string
key string
operator string
values []string
IPv4cidr string
IPv4prefix int32
IPv6cidr string
IPv6prefix int32
cidr string
prefix int32
mtu int32
role string
template string
}
type udnPodWithProbeResource struct {
name string
namespace string
label string
port int
failurethreshold int
periodseconds int
template string
}
func (pod *udnPodResource) createUdnPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (pod *udnPodResourceNode) createUdnPodNode(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (pod *udnPodWithProbeResource) createUdnPodWithProbe(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "PORT="+strconv.Itoa(int(pod.port)), "FAILURETHRESHOLD="+strconv.Itoa(int(pod.failurethreshold)), "PERIODSECONDS="+strconv.Itoa(int(pod.periodseconds)))
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (pod *udnPodSecNADResource) createUdnPodWithSecNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "ANNOTATION="+pod.annotation)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (pod *udnPodSecNADResourceNode) createUdnPodWithSecNADNode(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "NADNAME="+pod.nadname, "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
func (nad *udnNetDefResource) createUdnNad(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NADNAME="+nad.nadname, "NAMESPACE="+nad.namespace, "NAD_NETWORK_NAME="+nad.nad_network_name, "TOPOLOGY="+nad.topology, "SUBNET="+nad.subnet, "MTU="+strconv.Itoa(int(nad.mtu)), "NET_ATTACH_DEF_NAME="+nad.net_attach_def_name, "ROLE="+nad.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", nad.nadname))
}
func (nad *udnNetDefResource) deleteUdnNetDef(oc *exutil.CLI) {
removeResource(oc, false, true, "net-attach-def", nad.nadname, "-n", nad.namespace)
}
// getPodIPUDN returns IPv6 and IPv4 in vars in order on dual stack respectively and main IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var
func getPodIPUDN(oc *exutil.CLI, namespace string, podName string, netName string) (string, string) {
ipStack := checkIPStackType(oc)
cmdIPv4 := "ip a sho " + netName + " | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmdIPv6 := "ip -o -6 addr show dev " + netName + " | awk '$3 == \"inet6\" && $6 == \"global\" {print $4}' | cut -d'/' -f1"
if ipStack == "ipv4single" {
podIPv4, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv4)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The UDN pod %s IPv4 in namespace %s is %q", podName, namespace, podIPv4)
return podIPv4, ""
} else if ipStack == "ipv6single" {
podIPv6, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv6)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The UDN pod %s IPv6 in namespace %s is %q", podName, namespace, podIPv6)
return podIPv6, ""
} else {
podIPv4, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv4)
o.Expect(err).NotTo(o.HaveOccurred())
podIPv6, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv6)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The UDN pod's %s IPv6 and IPv4 IP in namespace %s is %q %q", podName, namespace, podIPv6, podIPv4)
return podIPv6, podIPv4
}
return "", ""
}
// CurlPod2PodPass checks connectivity across udn pods regardless of network addressing type on cluster
func CurlPod2PodPassUDN(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
// getPodIPUDN will returns IPv6 and IPv4 in vars in order on dual stack respectively and main IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, "ovn-udn1")
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// CurlPod2PodFailUDN ensures no connectivity from a udn pod to pod regardless of network addressing type on cluster
func CurlPod2PodFailUDN(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
// getPodIPUDN will returns IPv6 and IPv4 in vars in order on dual stack respectively and main IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, "ovn-udn1")
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
}
}
func CurlNode2PodFailUDN(oc *exutil.CLI, nodeName string, namespaceDst string, podNameDst string) {
//getPodIPUDN returns IPv6 and IPv4 in order on dual stack in PodIP1 and PodIP2 respectively and main IP in case of single stack (v4 or v6) in PodIP1, and nil in PodIP2
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, "ovn-udn1")
if podIP2 != "" {
podv4URL := net.JoinHostPort(podIP2, "8080")
_, err := exutil.DebugNode(oc, nodeName, "curl", podv4URL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
}
podURL := net.JoinHostPort(podIP1, "8080")
_, err := exutil.DebugNode(oc, nodeName, "curl", podURL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
}
func CurlUDNPod2PodPassMultiNetwork(oc *exutil.CLI, namespaceSrc string, namespaceDst string, podNameSrc string, netNameInterface string, podNameDst string, netNameDst string) {
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, netNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func CurlUDNPod2PodFailMultiNetwork(oc *exutil.CLI, namespaceSrc string, namespaceDst string, podNameSrc string, netNameInterface string, podNameDst string, netNameDst string) {
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, netNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
}
}
func (udncrd *udnCRDResource) createUdnCRDSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "CIDR="+udncrd.cidr, "PREFIX="+strconv.Itoa(int(udncrd.prefix)), "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
func (udncrd *udnCRDResource) createUdnCRDDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "IPv4CIDR="+udncrd.IPv4cidr, "IPv4PREFIX="+strconv.Itoa(int(udncrd.IPv4prefix)), "IPv6CIDR="+udncrd.IPv6cidr, "IPv6PREFIX="+strconv.Itoa(int(udncrd.IPv6prefix)), "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createCUDNCRDSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"CIDR="+cudncrd.cidr, "PREFIX="+strconv.Itoa(int(cudncrd.prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createCUDNCRDDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv4PREFIX="+strconv.Itoa(int(cudncrd.IPv4prefix)), "IPv6CIDR="+cudncrd.IPv6cidr, "IPv6PREFIX="+strconv.Itoa(int(cudncrd.IPv6prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createCUDNCRDMatchExpSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"CIDR="+cudncrd.cidr, "PREFIX="+strconv.Itoa(int(cudncrd.prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createCUDNCRDMatchExpDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv4PREFIX="+strconv.Itoa(int(cudncrd.IPv4prefix)), "IPv6CIDR="+cudncrd.IPv6cidr, "IPv6PREFIX="+strconv.Itoa(int(cudncrd.IPv6prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (udncrd *udnCRDResource) deleteUdnCRDDef(oc *exutil.CLI) {
removeResource(oc, true, true, "UserDefinedNetwork", udncrd.crdname, "-n", udncrd.namespace)
}
func waitUDNCRDApplied(oc *exutil.CLI, ns, crdName string) error {
checkErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
output, efErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("UserDefinedNetwork/"+crdName, "-n", ns, "--for", "condition=NetworkAllocationSucceeded=True").Output()
if efErr != nil {
e2e.Logf("Failed to get UDN %v, error: %s. Trying again", crdName, efErr)
return false, nil
}
if !strings.Contains(output, fmt.Sprintf("userdefinednetwork.k8s.ovn.org/%s condition met", crdName)) {
e2e.Logf("UDN CRD was not applied yet, trying again. \n %s", output)
return false, nil
}
return true, nil
})
return checkErr
}
func waitCUDNCRDApplied(oc *exutil.CLI, crdName string) error {
checkErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
output, efErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("ClusterUserDefinedNetwork/"+crdName, "--for", "condition=NetworkCreated=True").Output()
if efErr != nil {
e2e.Logf("Failed to get CUDN %v, error: %s. Trying again", crdName, efErr)
return false, nil
}
if !strings.Contains(output, fmt.Sprintf("clusteruserdefinednetwork.k8s.ovn.org/%s condition met", crdName)) {
e2e.Logf("CUDN CRD was not applied yet, trying again. \n %s", output)
return false, nil
}
return true, nil
})
return checkErr
}
func (udncrd *udnCRDResource) createLayer2DualStackUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "IPv4CIDR="+udncrd.IPv4cidr, "IPv6CIDR="+udncrd.IPv6cidr, "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
func (udncrd *udnCRDResource) createLayer2SingleStackUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "CIDR="+udncrd.cidr, "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createLayer2SingleStackCUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"CIDR="+cudncrd.cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createLayer2DualStackCUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv6CIDR="+cudncrd.IPv6cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createLayer2CUDNCRDMatchExpSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"CIDR="+cudncrd.cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func (cudncrd *cudnCRDResource) createLayer2CUDNCRDMatchExpDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv6CIDR="+cudncrd.IPv6cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
func checkPodCIDRsOverlap(oc *exutil.CLI, namespace string, ipStack string, Pods []string, netName string) bool {
var subnetsIPv4 []*net.IPNet
var subnetsIPv6 []*net.IPNet
var subnets []*net.IPNet
cmdIPv4 := "ip a sho " + netName + " | awk 'NR==3{print $2}'"
cmdIPv6 := "ip -o -6 addr show dev " + netName + " | awk '$3 == \"inet6\" && $6 == \"global\" {print $4}'"
for _, pod := range Pods {
if ipStack == "dualstack" {
podIPv4, ipv4Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv4)
o.Expect(ipv4Err).NotTo(o.HaveOccurred())
podIPv6, ipv6Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv6)
o.Expect(ipv6Err).NotTo(o.HaveOccurred())
_, subnetIPv4, err := net.ParseCIDR(strings.TrimSpace(podIPv4))
o.Expect(err).NotTo(o.HaveOccurred())
subnetsIPv4 = append(subnetsIPv4, subnetIPv4)
_, subnetIPv6, err := net.ParseCIDR(strings.TrimSpace(podIPv6))
o.Expect(err).NotTo(o.HaveOccurred())
subnetsIPv6 = append(subnetsIPv6, subnetIPv6)
} else {
if ipStack == "ipv6single" {
podIPv6, ipv6Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv6)
o.Expect(ipv6Err).NotTo(o.HaveOccurred())
_, subnet, err := net.ParseCIDR(strings.TrimSpace(podIPv6))
o.Expect(err).NotTo(o.HaveOccurred())
subnets = append(subnets, subnet)
} else {
podIPv4, ipv4Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv4)
o.Expect(ipv4Err).NotTo(o.HaveOccurred())
_, subnet, err := net.ParseCIDR(strings.TrimSpace(podIPv4))
o.Expect(err).NotTo(o.HaveOccurred())
subnets = append(subnets, subnet)
}
}
}
if ipStack == "dualstack" {
return subnetsIPv4[0].Contains(subnetsIPv4[1].IP) || subnetsIPv4[1].Contains(subnetsIPv4[0].IP) ||
subnetsIPv6[0].Contains(subnetsIPv6[1].IP) || subnetsIPv6[1].Contains(subnetsIPv6[0].IP)
} else {
return subnets[0].Contains(subnets[1].IP) || subnets[1].Contains(subnets[0].IP)
}
}
func applyL3UDNtoNamespace(oc *exutil.CLI, namespace string, udnSelector int) error {
udnCRDSingleStack := exutil.FixturePath("testdata", "networking", "udn", "udn_crd_singlestack_template.yaml")
udnCRDdualStack := exutil.FixturePath("testdata", "networking", "udn", "udn_crd_dualstack2_template.yaml")
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
ipStackType := checkIPStackType(oc)
var mtu int32 = 1300
var cidr, ipv4cidr, ipv6cidr []string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
prefix = 64
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv4prefix = 24
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
ipv6prefix = 64
}
}
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "l3-network-" + namespace,
namespace: namespace,
role: "Primary",
mtu: mtu,
IPv4cidr: ipv4cidr[udnSelector],
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr[udnSelector],
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "l3-network-" + namespace,
namespace: namespace,
role: "Primary",
mtu: mtu,
cidr: cidr[udnSelector],
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, namespace, udncrd.crdname)
return err
}
func applyCUDNtoMatchLabelNS(oc *exutil.CLI, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, topology string) (cudnCRDResource, error) {
var (
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
cudnCRDSingleStack = filepath.Join(testDataDirUDN, "cudn_crd_singlestack_template.yaml")
cudnCRDdualStack = filepath.Join(testDataDirUDN, "cudn_crd_dualstack_template.yaml")
cudnCRDL2dualStack = filepath.Join(testDataDirUDN, "cudn_crd_layer2_dualstack_template.yaml")
cudnCRDL2SingleStack = filepath.Join(testDataDirUDN, "cudn_crd_layer2_singlestack_template.yaml")
)
ipStackType := checkIPStackType(oc)
cudncrd := cudnCRDResource{
crdname: crdName,
labelkey: matchLabelKey,
labelvalue: matchValue,
role: "Primary",
mtu: 1300,
template: cudnCRDSingleStack,
}
if topology == "layer3" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv4prefix = 24
cudncrd.IPv6cidr = ipv6cidr
cudncrd.IPv6prefix = 64
cudncrd.template = cudnCRDdualStack
cudncrd.createCUDNCRDDualStack(oc)
} else if ipStackType == "ipv6single" {
cudncrd.prefix = 64
cudncrd.cidr = cidr
cudncrd.template = cudnCRDSingleStack
} else if ipStackType == "ipv4single" {
cudncrd.prefix = 24
cudncrd.cidr = cidr
cudncrd.template = cudnCRDSingleStack
cudncrd.createCUDNCRDSingleStack(oc)
}
} else if topology == "layer2" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv6cidr = ipv6cidr
cudncrd.template = cudnCRDL2dualStack
cudncrd.createLayer2DualStackCUDNCRD(oc)
} else {
cudncrd.cidr = cidr
cudncrd.template = cudnCRDL2SingleStack
cudncrd.createLayer2SingleStackCUDNCRD(oc)
}
}
err := waitCUDNCRDApplied(oc, cudncrd.crdname)
if err != nil {
return cudncrd, err
}
return cudncrd, nil
}
func PingPod2PodPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP2)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
if netutils.IsIPv6String(podIP1) {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP1)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
func PingPod2PodFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP2)
o.Expect(err).To(o.HaveOccurred())
} else {
if netutils.IsIPv6String(podIP1) {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP1)
o.Expect(err).To(o.HaveOccurred())
}
}
}
func verifyConnPod2Pod(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string, protocol string, port int, pass bool) {
e2e.Logf("==== Check %s traffic ====", protocol)
// kill socat process before sending/listen traffic
for _, nsPod := range [][]string{{namespaceSrc, podNameSrc}, {namespaceDst, podNameDst}} {
e2eoutput.RunHostCmd(nsPod[0], nsPod[1], "killall socat")
}
var clientOpt, serverOpt string
switch protocol {
case "UDP":
clientOpt = "udp-connect"
serverOpt = "udp6-listen"
case "SCTP":
clientOpt = "sctp-connect"
serverOpt = "sctp6-listen"
default:
e2e.Failf("protocol is not specified")
}
e2e.Logf("Listening on port %s on dst pod %s", strconv.Itoa(port), podNameDst)
serverCmd, serverCmdOutput, _, serverCmdErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespaceDst, podNameDst, "--", "socat", "-", serverOpt+":"+strconv.Itoa(port)+",fork").Background()
defer serverCmd.Process.Kill()
o.Expect(serverCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("Check %s process enabled in the dst pod %s", protocol, podNameDst)
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(namespaceDst, podNameDst, "ps aux | grep socat")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "30s", "5s").Should(o.ContainSubstring(serverOpt), "No expected process running on dst pod")
e2e.Logf("Sending %s packets from src pod %s to dst pod %s", protocol, podNameSrc, podNameDst)
podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst)
if pass {
if podIP2 != "" {
clientCmd := fmt.Sprintf("echo hello | socat - %s:%s", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
_, clientCmdErr := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
o.Expect(clientCmdErr).NotTo(o.HaveOccurred())
clientCmd = fmt.Sprintf("echo hello | socat - %s:%s", clientOpt, net.JoinHostPort(podIP2, strconv.Itoa(port)))
_, clientCmdErr = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
o.Expect(clientCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Count(serverCmdOutput.String(), "hello") == 2).To(o.BeTrue())
} else {
clientCmd := fmt.Sprintf("timeout 10 sh -c 'echo hello | socat - %s:%s'", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
_, clientCmdErr := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
o.Expect(clientCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Contains(serverCmdOutput.String(), "hello")).To(o.BeTrue())
}
} else {
if podIP2 != "" {
clientCmd := fmt.Sprintf("timeout 10 sh -c 'echo hello | socat - %s:%s'", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
clientCmd = fmt.Sprintf("timeout 10 sh -c 'echo hello | socat %s:%s'", clientOpt, net.JoinHostPort(podIP2, strconv.Itoa(port)))
e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Contains(serverCmdOutput.String(), "hello")).To(o.BeFalse())
} else {
clientCmd := fmt.Sprintf("timeout 10 sh -c 'echo hello | socat - %s:%s'", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Contains(serverCmdOutput.String(), "hello")).To(o.BeFalse())
}
}
}
func createGeneralUDNCRD(oc *exutil.CLI, namespace, crdName, ipv4cidr, ipv6cidr, cidr, layer string) {
// This is a function for common CRD creation without special requirement for parameters which is can be used for common cases and to reduce code lines in case level.
var (
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_dualstack2_template.yaml")
udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml")
udnCRDLayer2dualStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_dualstack_template.yaml")
udnCRDLayer2SingleStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml")
)
ipStackType := checkIPStackType(oc)
var udncrd udnCRDResource
if layer == "layer3" {
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv4prefix: 24,
IPv6cidr: ipv6cidr,
IPv6prefix: 64,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else if ipStackType == "ipv6single" {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: 64,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
} else {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: 24,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
} else if layer == "layer2" {
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDLayer2dualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDLayer2SingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
err := waitUDNCRDApplied(oc, namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("Not surpport UDN type for now.")
}
}
func createCUDNCRD(oc *exutil.CLI, key, crdName, ipv4cidr, ipv6cidr, cidr, layer string, values []string) (cudnCRDResource, error) {
// This is a function for common CUDN CRD creation without special requirement for parameters which is can be used for common cases and to reduce code lines in case level.
var (
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
cudnCRDL3dualStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_dualstack_template.yaml")
cudnCRDL3SingleStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_singlestack_template.yaml")
cudnCRDLayer2dualStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_layer2_dualstack_template.yaml")
cudnCRDLayer2SingleStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_layer2_singlestack_template.yaml")
)
ipStackType := checkIPStackType(oc)
cudncrd := cudnCRDResource{
crdname: crdName,
key: key,
operator: "In",
values: values,
role: "Primary",
mtu: 1300,
template: cudnCRDL3dualStack,
}
if layer == "layer3" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv4prefix = 24
cudncrd.IPv6cidr = ipv6cidr
cudncrd.IPv6prefix = 64
cudncrd.template = cudnCRDL3dualStack
cudncrd.createCUDNCRDMatchExpDualStack(oc)
} else if ipStackType == "ipv6single" {
cudncrd.prefix = 64
cudncrd.cidr = cidr
cudncrd.template = cudnCRDL3SingleStack
cudncrd.createCUDNCRDMatchExpSingleStack(oc)
} else {
cudncrd.prefix = 24
cudncrd.cidr = cidr
cudncrd.template = cudnCRDL3SingleStack
cudncrd.createCUDNCRDMatchExpSingleStack(oc)
}
} else if layer == "layer2" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv6cidr = ipv6cidr
cudncrd.template = cudnCRDLayer2dualStack
cudncrd.createLayer2CUDNCRDMatchExpDualStack(oc)
} else {
cudncrd.cidr = cidr
cudncrd.template = cudnCRDLayer2SingleStack
cudncrd.createLayer2CUDNCRDMatchExpSingleStack(oc)
}
} else {
e2e.Logf("Not supported UDN type for now.")
}
err := waitCUDNCRDApplied(oc, cudncrd.crdname)
if err != nil {
return cudncrd, err
}
return cudncrd, nil
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
16f3f33a-5a73-4cb8-bf46-e1da7964d3f9
|
createUdnPod
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnPodResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (pod *udnPodResource) createUdnPod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
31da1215-09ad-4df8-81fb-213f7c72f876
|
createUdnPodNode
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnPodResourceNode']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (pod *udnPodResourceNode) createUdnPodNode(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
bc8a1583-76fb-433a-8c7c-894129e19c10
|
createUdnPodWithProbe
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnPodWithProbeResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (pod *udnPodWithProbeResource) createUdnPodWithProbe(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "PORT="+strconv.Itoa(int(pod.port)), "FAILURETHRESHOLD="+strconv.Itoa(int(pod.failurethreshold)), "PERIODSECONDS="+strconv.Itoa(int(pod.periodseconds)))
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
adf8a86c-be9b-4dc6-aee3-cd245e4b9dae
|
createUdnPodWithSecNAD
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnPodSecNADResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (pod *udnPodSecNADResource) createUdnPodWithSecNAD(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "ANNOTATION="+pod.annotation)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
f84c1100-c65c-43d5-92e2-10fcd13e607e
|
createUdnPodWithSecNADNode
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnPodSecNADResourceNode']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (pod *udnPodSecNADResourceNode) createUdnPodWithSecNADNode(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "LABEL="+pod.label, "NADNAME="+pod.nadname, "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
35d57c18-0e03-45d8-b5cb-a193ba3c65d4
|
createUdnNad
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnNetDefResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (nad *udnNetDefResource) createUdnNad(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nad.template, "-p", "NADNAME="+nad.nadname, "NAMESPACE="+nad.namespace, "NAD_NETWORK_NAME="+nad.nad_network_name, "TOPOLOGY="+nad.topology, "SUBNET="+nad.subnet, "MTU="+strconv.Itoa(int(nad.mtu)), "NET_ATTACH_DEF_NAME="+nad.net_attach_def_name, "ROLE="+nad.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", nad.nadname))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
5b425fad-bd67-4c56-abcf-5c7743d24159
|
deleteUdnNetDef
|
['"net"', 'netutils "k8s.io/utils/net"']
|
['udnNetDefResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (nad *udnNetDefResource) deleteUdnNetDef(oc *exutil.CLI) {
removeResource(oc, false, true, "net-attach-def", nad.nadname, "-n", nad.namespace)
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
dbf41d4e-a063-4356-ac79-236d1f353e9e
|
getPodIPUDN
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func getPodIPUDN(oc *exutil.CLI, namespace string, podName string, netName string) (string, string) {
ipStack := checkIPStackType(oc)
cmdIPv4 := "ip a sho " + netName + " | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmdIPv6 := "ip -o -6 addr show dev " + netName + " | awk '$3 == \"inet6\" && $6 == \"global\" {print $4}' | cut -d'/' -f1"
if ipStack == "ipv4single" {
podIPv4, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv4)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The UDN pod %s IPv4 in namespace %s is %q", podName, namespace, podIPv4)
return podIPv4, ""
} else if ipStack == "ipv6single" {
podIPv6, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv6)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The UDN pod %s IPv6 in namespace %s is %q", podName, namespace, podIPv6)
return podIPv6, ""
} else {
podIPv4, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv4)
o.Expect(err).NotTo(o.HaveOccurred())
podIPv6, err := execCommandInSpecificPod(oc, namespace, podName, cmdIPv6)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The UDN pod's %s IPv6 and IPv4 IP in namespace %s is %q %q", podName, namespace, podIPv6, podIPv4)
return podIPv6, podIPv4
}
return "", ""
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
f30dc033-a254-4d01-a66a-d9e27c632ab5
|
CurlPod2PodPassUDN
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func CurlPod2PodPassUDN(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
// getPodIPUDN will returns IPv6 and IPv4 in vars in order on dual stack respectively and main IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, "ovn-udn1")
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
eec30d2a-a96c-494a-8786-3f33ead66e75
|
CurlPod2PodFailUDN
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func CurlPod2PodFailUDN(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
// getPodIPUDN will returns IPv6 and IPv4 in vars in order on dual stack respectively and main IP in case of single stack (v4 or v6) in 1st var, and nil in 2nd var
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, "ovn-udn1")
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
f1d1dc14-97a5-4136-92fe-84c6bd2bb5d9
|
CurlNode2PodFailUDN
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func CurlNode2PodFailUDN(oc *exutil.CLI, nodeName string, namespaceDst string, podNameDst string) {
//getPodIPUDN returns IPv6 and IPv4 in order on dual stack in PodIP1 and PodIP2 respectively and main IP in case of single stack (v4 or v6) in PodIP1, and nil in PodIP2
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, "ovn-udn1")
if podIP2 != "" {
podv4URL := net.JoinHostPort(podIP2, "8080")
_, err := exutil.DebugNode(oc, nodeName, "curl", podv4URL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
}
podURL := net.JoinHostPort(podIP1, "8080")
_, err := exutil.DebugNode(oc, nodeName, "curl", podURL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
57e1b2b8-2ea9-469a-9c0b-b6d6ed50a866
|
CurlUDNPod2PodPassMultiNetwork
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func CurlUDNPod2PodPassMultiNetwork(oc *exutil.CLI, namespaceSrc string, namespaceDst string, podNameSrc string, netNameInterface string, podNameDst string, netNameDst string) {
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, netNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
480ec2e1-80ff-4b21-b6b5-d158fc4f90e4
|
CurlUDNPod2PodFailMultiNetwork
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func CurlUDNPod2PodFailMultiNetwork(oc *exutil.CLI, namespaceSrc string, namespaceDst string, podNameSrc string, netNameInterface string, podNameDst string, netNameDst string) {
podIP1, podIP2 := getPodIPUDN(oc, namespaceDst, podNameDst, netNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl --interface "+netNameInterface+" --connect-timeout 5 -s "+net.JoinHostPort(podIP1, "8080"))
o.Expect(err).To(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
10d51e02-09a6-4a65-9589-ce6711f9062b
|
createUdnCRDSingleStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (udncrd *udnCRDResource) createUdnCRDSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "CIDR="+udncrd.cidr, "PREFIX="+strconv.Itoa(int(udncrd.prefix)), "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
b2ec3a33-1fd6-45be-93cc-cff941c9f9b6
|
createUdnCRDDualStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (udncrd *udnCRDResource) createUdnCRDDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "IPv4CIDR="+udncrd.IPv4cidr, "IPv4PREFIX="+strconv.Itoa(int(udncrd.IPv4prefix)), "IPv6CIDR="+udncrd.IPv6cidr, "IPv6PREFIX="+strconv.Itoa(int(udncrd.IPv6prefix)), "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
3f6a9ee0-8672-43a2-8c51-ae40c4c1e65e
|
createCUDNCRDSingleStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createCUDNCRDSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"CIDR="+cudncrd.cidr, "PREFIX="+strconv.Itoa(int(cudncrd.prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
77c2ad1d-0894-49f7-a25f-cb822b884424
|
createCUDNCRDDualStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createCUDNCRDDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv4PREFIX="+strconv.Itoa(int(cudncrd.IPv4prefix)), "IPv6CIDR="+cudncrd.IPv6cidr, "IPv6PREFIX="+strconv.Itoa(int(cudncrd.IPv6prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
9dd47aae-38e8-4eb7-92ef-f28b5a28241c
|
createCUDNCRDMatchExpSingleStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createCUDNCRDMatchExpSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"CIDR="+cudncrd.cidr, "PREFIX="+strconv.Itoa(int(cudncrd.prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
2ba3f32c-5ffa-4943-a536-6c5d7cbfae44
|
createCUDNCRDMatchExpDualStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createCUDNCRDMatchExpDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv4PREFIX="+strconv.Itoa(int(cudncrd.IPv4prefix)), "IPv6CIDR="+cudncrd.IPv6cidr, "IPv6PREFIX="+strconv.Itoa(int(cudncrd.IPv6prefix)), "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
778268ae-8841-461b-aac9-56b4243dc9fa
|
deleteUdnCRDDef
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (udncrd *udnCRDResource) deleteUdnCRDDef(oc *exutil.CLI) {
removeResource(oc, true, true, "UserDefinedNetwork", udncrd.crdname, "-n", udncrd.namespace)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
624ecc1b-dd33-47d4-aa92-233e5d49d157
|
waitUDNCRDApplied
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func waitUDNCRDApplied(oc *exutil.CLI, ns, crdName string) error {
checkErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
output, efErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("UserDefinedNetwork/"+crdName, "-n", ns, "--for", "condition=NetworkAllocationSucceeded=True").Output()
if efErr != nil {
e2e.Logf("Failed to get UDN %v, error: %s. Trying again", crdName, efErr)
return false, nil
}
if !strings.Contains(output, fmt.Sprintf("userdefinednetwork.k8s.ovn.org/%s condition met", crdName)) {
e2e.Logf("UDN CRD was not applied yet, trying again. \n %s", output)
return false, nil
}
return true, nil
})
return checkErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d677555c-7803-445a-b5a6-fa76ec5f779d
|
waitCUDNCRDApplied
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func waitCUDNCRDApplied(oc *exutil.CLI, crdName string) error {
checkErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
output, efErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("ClusterUserDefinedNetwork/"+crdName, "--for", "condition=NetworkCreated=True").Output()
if efErr != nil {
e2e.Logf("Failed to get CUDN %v, error: %s. Trying again", crdName, efErr)
return false, nil
}
if !strings.Contains(output, fmt.Sprintf("clusteruserdefinednetwork.k8s.ovn.org/%s condition met", crdName)) {
e2e.Logf("CUDN CRD was not applied yet, trying again. \n %s", output)
return false, nil
}
return true, nil
})
return checkErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
dc08f45b-5662-4c95-9a00-8f5e9baa5570
|
createLayer2DualStackUDNCRD
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (udncrd *udnCRDResource) createLayer2DualStackUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "IPv4CIDR="+udncrd.IPv4cidr, "IPv6CIDR="+udncrd.IPv6cidr, "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
5129e566-570f-456a-a87c-a43fd547f6e9
|
createLayer2SingleStackUDNCRD
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (udncrd *udnCRDResource) createLayer2SingleStackUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", udncrd.template, "-p", "CRDNAME="+udncrd.crdname, "NAMESPACE="+udncrd.namespace, "CIDR="+udncrd.cidr, "MTU="+strconv.Itoa(int(udncrd.mtu)), "ROLE="+udncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create udn CRD %s due to %v", udncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
fd4823b2-2c1c-498d-858f-6831a0b5cb2e
|
createLayer2SingleStackCUDNCRD
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createLayer2SingleStackCUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"CIDR="+cudncrd.cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
86650b19-1f25-4100-a227-2b9b7f1a2d3e
|
createLayer2DualStackCUDNCRD
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createLayer2DualStackCUDNCRD(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "LABELKEY="+cudncrd.labelkey, "LABELVALUE="+cudncrd.labelvalue,
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv6CIDR="+cudncrd.IPv6cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
8592ce1b-8370-46ad-8ed4-fc83a1995071
|
createLayer2CUDNCRDMatchExpSingleStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createLayer2CUDNCRDMatchExpSingleStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"CIDR="+cudncrd.cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ddd38a83-714e-45a6-b7eb-d85cf3b77540
|
createLayer2CUDNCRDMatchExpDualStack
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func (cudncrd *cudnCRDResource) createLayer2CUDNCRDMatchExpDualStack(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", cudncrd.template, "-p", "CRDNAME="+cudncrd.crdname, "KEY="+cudncrd.key, "OPERATOR="+cudncrd.operator, "VALUE1="+cudncrd.values[0], "VALUE2="+cudncrd.values[1],
"IPv4CIDR="+cudncrd.IPv4cidr, "IPv6CIDR="+cudncrd.IPv6cidr, "MTU="+strconv.Itoa(int(cudncrd.mtu)), "ROLE="+cudncrd.role)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create cudn CRD %s due to %v", cudncrd.crdname, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
90781440-8531-44d1-a606-132715466a58
|
checkPodCIDRsOverlap
|
['"net"', '"strings"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func checkPodCIDRsOverlap(oc *exutil.CLI, namespace string, ipStack string, Pods []string, netName string) bool {
var subnetsIPv4 []*net.IPNet
var subnetsIPv6 []*net.IPNet
var subnets []*net.IPNet
cmdIPv4 := "ip a sho " + netName + " | awk 'NR==3{print $2}'"
cmdIPv6 := "ip -o -6 addr show dev " + netName + " | awk '$3 == \"inet6\" && $6 == \"global\" {print $4}'"
for _, pod := range Pods {
if ipStack == "dualstack" {
podIPv4, ipv4Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv4)
o.Expect(ipv4Err).NotTo(o.HaveOccurred())
podIPv6, ipv6Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv6)
o.Expect(ipv6Err).NotTo(o.HaveOccurred())
_, subnetIPv4, err := net.ParseCIDR(strings.TrimSpace(podIPv4))
o.Expect(err).NotTo(o.HaveOccurred())
subnetsIPv4 = append(subnetsIPv4, subnetIPv4)
_, subnetIPv6, err := net.ParseCIDR(strings.TrimSpace(podIPv6))
o.Expect(err).NotTo(o.HaveOccurred())
subnetsIPv6 = append(subnetsIPv6, subnetIPv6)
} else {
if ipStack == "ipv6single" {
podIPv6, ipv6Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv6)
o.Expect(ipv6Err).NotTo(o.HaveOccurred())
_, subnet, err := net.ParseCIDR(strings.TrimSpace(podIPv6))
o.Expect(err).NotTo(o.HaveOccurred())
subnets = append(subnets, subnet)
} else {
podIPv4, ipv4Err := execCommandInSpecificPod(oc, namespace, pod, cmdIPv4)
o.Expect(ipv4Err).NotTo(o.HaveOccurred())
_, subnet, err := net.ParseCIDR(strings.TrimSpace(podIPv4))
o.Expect(err).NotTo(o.HaveOccurred())
subnets = append(subnets, subnet)
}
}
}
if ipStack == "dualstack" {
return subnetsIPv4[0].Contains(subnetsIPv4[1].IP) || subnetsIPv4[1].Contains(subnetsIPv4[0].IP) ||
subnetsIPv6[0].Contains(subnetsIPv6[1].IP) || subnetsIPv6[1].Contains(subnetsIPv6[0].IP)
} else {
return subnets[0].Contains(subnets[1].IP) || subnets[1].Contains(subnets[0].IP)
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
53b6770c-fa8a-4c5f-bbd6-dbd725c41157
|
applyL3UDNtoNamespace
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func applyL3UDNtoNamespace(oc *exutil.CLI, namespace string, udnSelector int) error {
udnCRDSingleStack := exutil.FixturePath("testdata", "networking", "udn", "udn_crd_singlestack_template.yaml")
udnCRDdualStack := exutil.FixturePath("testdata", "networking", "udn", "udn_crd_dualstack2_template.yaml")
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
ipStackType := checkIPStackType(oc)
var mtu int32 = 1300
var cidr, ipv4cidr, ipv6cidr []string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
prefix = 64
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16", "10.151.0.0/16"}
ipv4prefix = 24
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2011:100:200::0/60"}
ipv6prefix = 64
}
}
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "l3-network-" + namespace,
namespace: namespace,
role: "Primary",
mtu: mtu,
IPv4cidr: ipv4cidr[udnSelector],
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr[udnSelector],
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "l3-network-" + namespace,
namespace: namespace,
role: "Primary",
mtu: mtu,
cidr: cidr[udnSelector],
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, namespace, udncrd.crdname)
return err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
076ea4fb-c28d-4e96-a0db-f496eca986d9
|
applyCUDNtoMatchLabelNS
|
['"path/filepath"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func applyCUDNtoMatchLabelNS(oc *exutil.CLI, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, topology string) (cudnCRDResource, error) {
var (
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
cudnCRDSingleStack = filepath.Join(testDataDirUDN, "cudn_crd_singlestack_template.yaml")
cudnCRDdualStack = filepath.Join(testDataDirUDN, "cudn_crd_dualstack_template.yaml")
cudnCRDL2dualStack = filepath.Join(testDataDirUDN, "cudn_crd_layer2_dualstack_template.yaml")
cudnCRDL2SingleStack = filepath.Join(testDataDirUDN, "cudn_crd_layer2_singlestack_template.yaml")
)
ipStackType := checkIPStackType(oc)
cudncrd := cudnCRDResource{
crdname: crdName,
labelkey: matchLabelKey,
labelvalue: matchValue,
role: "Primary",
mtu: 1300,
template: cudnCRDSingleStack,
}
if topology == "layer3" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv4prefix = 24
cudncrd.IPv6cidr = ipv6cidr
cudncrd.IPv6prefix = 64
cudncrd.template = cudnCRDdualStack
cudncrd.createCUDNCRDDualStack(oc)
} else if ipStackType == "ipv6single" {
cudncrd.prefix = 64
cudncrd.cidr = cidr
cudncrd.template = cudnCRDSingleStack
} else if ipStackType == "ipv4single" {
cudncrd.prefix = 24
cudncrd.cidr = cidr
cudncrd.template = cudnCRDSingleStack
cudncrd.createCUDNCRDSingleStack(oc)
}
} else if topology == "layer2" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv6cidr = ipv6cidr
cudncrd.template = cudnCRDL2dualStack
cudncrd.createLayer2DualStackCUDNCRD(oc)
} else {
cudncrd.cidr = cidr
cudncrd.template = cudnCRDL2SingleStack
cudncrd.createLayer2SingleStackCUDNCRD(oc)
}
}
err := waitCUDNCRDApplied(oc, cudncrd.crdname)
if err != nil {
return cudncrd, err
}
return cudncrd, nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
6c20eb89-fac8-4785-8312-f93aa1e1e25f
|
PingPod2PodPass
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func PingPod2PodPass(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP2)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
if netutils.IsIPv6String(podIP1) {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP1)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
524b8bdf-c147-4f5c-a572-9398d0e841c2
|
PingPod2PodFail
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func PingPod2PodFail(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string) {
podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst)
if podIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP2)
o.Expect(err).To(o.HaveOccurred())
} else {
if netutils.IsIPv6String(podIP1) {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping6 -c4 "+podIP1)
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "ping -c4 "+podIP1)
o.Expect(err).To(o.HaveOccurred())
}
}
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
d0a545ab-0581-4578-ae46-8365e61f2562
|
verifyConnPod2Pod
|
['"fmt"', '"net"', '"strconv"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func verifyConnPod2Pod(oc *exutil.CLI, namespaceSrc string, podNameSrc string, namespaceDst string, podNameDst string, protocol string, port int, pass bool) {
e2e.Logf("==== Check %s traffic ====", protocol)
// kill socat process before sending/listen traffic
for _, nsPod := range [][]string{{namespaceSrc, podNameSrc}, {namespaceDst, podNameDst}} {
e2eoutput.RunHostCmd(nsPod[0], nsPod[1], "killall socat")
}
var clientOpt, serverOpt string
switch protocol {
case "UDP":
clientOpt = "udp-connect"
serverOpt = "udp6-listen"
case "SCTP":
clientOpt = "sctp-connect"
serverOpt = "sctp6-listen"
default:
e2e.Failf("protocol is not specified")
}
e2e.Logf("Listening on port %s on dst pod %s", strconv.Itoa(port), podNameDst)
serverCmd, serverCmdOutput, _, serverCmdErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespaceDst, podNameDst, "--", "socat", "-", serverOpt+":"+strconv.Itoa(port)+",fork").Background()
defer serverCmd.Process.Kill()
o.Expect(serverCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("Check %s process enabled in the dst pod %s", protocol, podNameDst)
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(namespaceDst, podNameDst, "ps aux | grep socat")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "30s", "5s").Should(o.ContainSubstring(serverOpt), "No expected process running on dst pod")
e2e.Logf("Sending %s packets from src pod %s to dst pod %s", protocol, podNameSrc, podNameDst)
podIP1, podIP2 := getPodIP(oc, namespaceDst, podNameDst)
if pass {
if podIP2 != "" {
clientCmd := fmt.Sprintf("echo hello | socat - %s:%s", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
_, clientCmdErr := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
o.Expect(clientCmdErr).NotTo(o.HaveOccurred())
clientCmd = fmt.Sprintf("echo hello | socat - %s:%s", clientOpt, net.JoinHostPort(podIP2, strconv.Itoa(port)))
_, clientCmdErr = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
o.Expect(clientCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Count(serverCmdOutput.String(), "hello") == 2).To(o.BeTrue())
} else {
clientCmd := fmt.Sprintf("timeout 10 sh -c 'echo hello | socat - %s:%s'", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
_, clientCmdErr := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
o.Expect(clientCmdErr).NotTo(o.HaveOccurred())
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Contains(serverCmdOutput.String(), "hello")).To(o.BeTrue())
}
} else {
if podIP2 != "" {
clientCmd := fmt.Sprintf("timeout 10 sh -c 'echo hello | socat - %s:%s'", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
clientCmd = fmt.Sprintf("timeout 10 sh -c 'echo hello | socat %s:%s'", clientOpt, net.JoinHostPort(podIP2, strconv.Itoa(port)))
e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Contains(serverCmdOutput.String(), "hello")).To(o.BeFalse())
} else {
clientCmd := fmt.Sprintf("timeout 10 sh -c 'echo hello | socat - %s:%s'", clientOpt, net.JoinHostPort(podIP1, strconv.Itoa(port)))
e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, clientCmd)
e2e.Logf("output on server side: %s", serverCmdOutput.String())
o.Expect(strings.Contains(serverCmdOutput.String(), "hello")).To(o.BeFalse())
}
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
6e91d5e9-a31a-4fe3-bdf0-280e49862028
|
createGeneralUDNCRD
|
['"path/filepath"']
|
['udnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func createGeneralUDNCRD(oc *exutil.CLI, namespace, crdName, ipv4cidr, ipv6cidr, cidr, layer string) {
// This is a function for common CRD creation without special requirement for parameters which is can be used for common cases and to reduce code lines in case level.
var (
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_dualstack2_template.yaml")
udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml")
udnCRDLayer2dualStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_dualstack_template.yaml")
udnCRDLayer2SingleStack = filepath.Join(testDataDirUDN, "udn_crd_layer2_singlestack_template.yaml")
)
ipStackType := checkIPStackType(oc)
var udncrd udnCRDResource
if layer == "layer3" {
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv4prefix: 24,
IPv6cidr: ipv6cidr,
IPv6prefix: 64,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else if ipStackType == "ipv6single" {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: 64,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
} else {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: 24,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
} else if layer == "layer2" {
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDLayer2dualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: crdName,
namespace: namespace,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDLayer2SingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
err := waitUDNCRDApplied(oc, namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("Not surpport UDN type for now.")
}
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
f869e081-0a93-48c2-a79e-cab5d90c0308
|
createCUDNCRD
|
['"path/filepath"']
|
['cudnCRDResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nad_utils.go
|
func createCUDNCRD(oc *exutil.CLI, key, crdName, ipv4cidr, ipv6cidr, cidr, layer string, values []string) (cudnCRDResource, error) {
// This is a function for common CUDN CRD creation without special requirement for parameters which is can be used for common cases and to reduce code lines in case level.
var (
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
cudnCRDL3dualStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_dualstack_template.yaml")
cudnCRDL3SingleStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_singlestack_template.yaml")
cudnCRDLayer2dualStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_layer2_dualstack_template.yaml")
cudnCRDLayer2SingleStack = filepath.Join(testDataDirUDN, "cudn_crd_matchexp_layer2_singlestack_template.yaml")
)
ipStackType := checkIPStackType(oc)
cudncrd := cudnCRDResource{
crdname: crdName,
key: key,
operator: "In",
values: values,
role: "Primary",
mtu: 1300,
template: cudnCRDL3dualStack,
}
if layer == "layer3" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv4prefix = 24
cudncrd.IPv6cidr = ipv6cidr
cudncrd.IPv6prefix = 64
cudncrd.template = cudnCRDL3dualStack
cudncrd.createCUDNCRDMatchExpDualStack(oc)
} else if ipStackType == "ipv6single" {
cudncrd.prefix = 64
cudncrd.cidr = cidr
cudncrd.template = cudnCRDL3SingleStack
cudncrd.createCUDNCRDMatchExpSingleStack(oc)
} else {
cudncrd.prefix = 24
cudncrd.cidr = cidr
cudncrd.template = cudnCRDL3SingleStack
cudncrd.createCUDNCRDMatchExpSingleStack(oc)
}
} else if layer == "layer2" {
if ipStackType == "dualstack" {
cudncrd.IPv4cidr = ipv4cidr
cudncrd.IPv6cidr = ipv6cidr
cudncrd.template = cudnCRDLayer2dualStack
cudncrd.createLayer2CUDNCRDMatchExpDualStack(oc)
} else {
cudncrd.cidr = cidr
cudncrd.template = cudnCRDLayer2SingleStack
cudncrd.createLayer2CUDNCRDMatchExpSingleStack(oc)
}
} else {
e2e.Logf("Not supported UDN type for now.")
}
err := waitCUDNCRDApplied(oc, cudncrd.crdname)
if err != nil {
return cudncrd, err
}
return cudncrd, nil
}
|
networking
| |||
test
|
openshift/openshift-tests-private
|
7d922c5b-bb63-4265-b507-46605fef5d4d
|
network_tools
|
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
netutils "k8s.io/utils/net"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
package networking
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
netutils "k8s.io/utils/net"
)
var _ = g.Describe("[sig-networking] SDN network-tools ovnkube-trace", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-tools", exutil.KubeConfigPath())
expPod2PodResult = []string{"ovn-trace source pod to destination pod indicates success",
"ovn-trace destination pod to source pod indicates success",
"ovs-appctl ofproto/trace source pod to destination pod indicates success",
"ovs-appctl ofproto/trace destination pod to source pod indicates success",
"ovn-detrace source pod to destination pod indicates success",
"ovn-detrace destination pod to source pod indicates success"}
expPod2PodRemoteResult = []string{"ovn-trace (remote) source pod to destination pod indicates success",
"ovn-trace (remote) destination pod to source pod indicates success"}
expPod2SvcResult = []string{"ovn-trace source pod to service clusterIP indicates success"}
expPod2IPResult = []string{"ovn-trace from pod to IP indicates success",
"ovs-appctl ofproto/trace pod to IP indicates success",
"ovn-detrace pod to external IP indicates success"}
image = "openshift/network-tools:latest"
)
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
// author: [email protected]
g.It("Author:qiowang-Medium-67625-Medium-67648-Check ovnkube-trace - pod2pod traffic and pod2hostnetworkpod traffic", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml")
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
workerNode1 := nodeList.Items[0].Name
workerNode2 := nodeList.Items[1].Name
tmpPath := "/tmp/ocp-67625-67648"
defer os.RemoveAll(tmpPath)
exutil.By("1. Create hello-pod1, pod located on the first node")
ns := oc.Namespace()
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns,
nodename: workerNode1,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("2. Create hello-pod2 and hostnetwork hostnetwork-hello-pod2, pod located on the first node")
//Required for hostnetwork pod
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pod2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns,
nodename: workerNode1,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
hostnetworkPod2 := pingPodResourceNode{
name: "hostnetwork-hello-pod2",
namespace: ns,
nodename: workerNode1,
template: hostNetworkPodTemplate,
}
hostnetworkPod2.createPingPodNode(oc)
waitPodReady(oc, hostnetworkPod2.namespace, hostnetworkPod2.name)
exutil.By("3. Create hello-pod3 and hostnetwork hostnetwork-hello-pod3, pod located on the second node")
pod3 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns,
nodename: workerNode2,
template: pingPodNodeTemplate,
}
pod3.createPingPodNode(oc)
waitPodReady(oc, pod3.namespace, pod3.name)
hostnetworkPod3 := pingPodResourceNode{
name: "hostnetwork-hello-pod3",
namespace: ns,
nodename: workerNode2,
template: hostNetworkPodTemplate,
}
hostnetworkPod3.createPingPodNode(oc)
waitPodReady(oc, hostnetworkPod3.namespace, hostnetworkPod3.name)
exutil.By("4. Simulate traffic between pod and pod when they land on the same node")
podIP1 := getPodIPv4(oc, ns, pod1.name)
addrFamily := "ip4"
if netutils.IsIPv6String(podIP1) {
addrFamily = "ip6"
}
cmd := "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + pod2.name + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
exutil.By("5. Simulate traffic between pod and pod when they land on different nodes")
cmd = "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + pod3.name + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr = collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
for _, expResult := range expPod2PodRemoteResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
exutil.By("6. Simulate traffic between pod and hostnetwork pod when they land on the same node")
cmd = "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + hostnetworkPod2.name + " -udp -addr-family " + addrFamily
traceOutput, cmdErr = collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
exutil.By("7. Simulate traffic between pod and hostnetwork pod when they land on different nodes")
cmd = "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + hostnetworkPod3.name + " -udp -addr-family " + addrFamily
traceOutput, cmdErr = collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
o.Expect(strings.Contains(string(traceOutput), expPod2PodRemoteResult[1])).Should(o.BeTrue())
})
g.It("Author:qiowang-Medium-67649-Check ovnkube-trace - pod2service traffic", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
tmpPath := "/tmp/ocp-67649"
defer os.RemoveAll(tmpPath)
exutil.By("1. Create hello-pod")
ns := oc.Namespace()
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("2. Simulate traffic between pod and service")
podIP1 := getPodIPv4(oc, ns, pod.name)
addrFamily := "ip4"
if netutils.IsIPv6String(podIP1) {
addrFamily = "ip6"
}
cmd := "ovnkube-trace -src-namespace " + ns + " -src " + pod.name + " -dst-namespace openshift-dns -service dns-default -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
for _, expResult := range expPod2SvcResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
})
g.It("Author:qiowang-NonPreRelease-Medium-55180-Check ovnkube-trace - pod2external traffic [Disruptive]", func() {
var (
testScope = []string{"without egressip", "with egressip"}
egressNodeLabel = "k8s.ovn.org/egress-assignable"
externalIPv4 = "8.8.8.8"
externalIPv6 = "2001:4860:4860::8888"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
// check if the cluster supported for test steps related to egressip
// focus on RDU dualstack/ipv6single cluster for ipv6 traffic, and other supported platforms for ipv4 traffic
testList := []string{testScope[0]}
addrFamily := "ip4"
externalIP := externalIPv4
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
addrFamily = "ip6"
externalIP = externalIPv6
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
e2e.Logf("Test steps related egressip will only run on rdu1 or rdu2 dualstack/ipv6single cluster, skip for other envrionment!!")
} else {
testList = append(testList, testScope[1])
}
} else {
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "aws") || strings.Contains(platform, "gcp") || strings.Contains(platform, "openstack") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "azure") || strings.Contains(platform, "nutanix")
if !acceptedPlatform {
e2e.Logf("Test steps related egressip should be run on AWS/GCP/Azure/Openstack/Vsphere/BareMetal/Nutanix cluster, will skip for other platforms!!")
} else {
testList = append(testList, testScope[1])
}
}
tmpPath := "/tmp/ocp-55180"
defer os.RemoveAll(tmpPath)
var nsList, podList []string
for _, testItem := range testList {
exutil.By("Verify pod2external traffic when the pod associate " + testItem)
exutil.By("Create namespace")
oc.SetupProject()
ns := oc.Namespace()
nsList = append(nsList, ns)
if testItem == "with egressip" {
exutil.By("Label namespace with name=test")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
exutil.By("Label EgressIP node")
egressNode := nodeList.Items[0].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("Create egressip object")
var freeIPs []string
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
freeIPs = findFreeIPv6s(oc, egressNode, 2)
} else {
freeIPs = findFreeIPs(oc, egressNode, 2)
}
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip := egressIPResource1{
name: "egressip-55180",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip.deleteEgressIPObject1(oc)
egressip.createEgressIPObject1(oc)
egressIPMaps := getAssignedEIPInEIPObject(oc, egressip.name)
o.Expect(len(egressIPMaps)).Should(o.Equal(1))
}
exutil.By("Create test pod in the namespace")
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
podList = append(podList, pod.name)
exutil.By("Simulate traffic between pod and external IP, pod associate " + testItem)
cmd := "ovnkube-trace -src-namespace " + ns + " -src " + pod.name + " -dst-ip " + externalIP + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2IPResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
}
exutil.By("Switch gateway mode")
origMode := getOVNGatewayMode(oc)
var desiredMode string
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
for i, testItem := range testList {
exutil.By("Simulate traffic between pod and external IP, pod associate " + testItem)
cmd := "ovnkube-trace -src-namespace " + nsList[i] + " -src " + podList[i] + " -dst-ip " + externalIP + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2IPResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
}
})
})
var _ = g.Describe("[sig-networking] SDN network-tools scripts", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-tools", exutil.KubeConfigPath())
image = "openshift/network-tools:latest"
)
g.It("Author:qiowang-NonHyperShiftHOST-Medium-55890-Verify functionality of network-tools script - ovn-get", func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
scriptName := "ovn-get"
exutil.By("1. Get ovn-k/nbdb/sbdb leaders with " + scriptName + " script")
e2e.Logf("Get ovnk leader pod")
ovnkLeader := getOVNKMasterPod(oc)
mustgatherDir := "/tmp/must-gather-55890-1"
defer os.RemoveAll(mustgatherDir)
parameters := []string{"network-tools", scriptName, "leaders"}
output, cmdErr := collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ovn-k master leader "+ovnkLeader)).Should(o.BeTrue())
o.Expect(strings.Contains(output, "nbdb leader not applicable in ovn-ic mode")).Should(o.BeTrue())
o.Expect(strings.Contains(output, "sbdb leader not applicable in ovn-ic mode")).Should(o.BeTrue())
exutil.By("2. Download dbs with " + scriptName + " script")
e2e.Logf("Get all ovnkube-node pods")
ovnNodePods := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
mustgatherDir = "/tmp/must-gather-55890-2"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "dbs"}
_, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
files, getFilesErr := exec.Command("bash", "-c", "ls -l "+mustgatherDir+"/quay-io-openshift-release-dev-ocp*").Output()
o.Expect(getFilesErr).NotTo(o.HaveOccurred())
for _, podName := range ovnNodePods {
o.Expect(strings.Contains(string(files), podName+"_nbdb")).Should(o.BeTrue())
o.Expect(strings.Contains(string(files), podName+"_sbdb")).Should(o.BeTrue())
}
exutil.By("3. Get ovn cluster mode with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55890-3"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "mode"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "cluster is running in multi-zone (ovn-interconnect / ovn-ic)")).Should(o.BeTrue())
})
g.It("Author:qiowang-Medium-55889-Verify functionality of network-tools script - ovn-db-run-command", func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
scriptName := "ovn-db-run-command"
exutil.By("1. Run ovn-nbctl command with " + scriptName + " script")
mustgatherDir := "/tmp/must-gather-55889-1"
defer os.RemoveAll(mustgatherDir)
parameters := []string{"network-tools", scriptName, "ovn-nbctl", "lr-list"}
output, cmdErr := collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ovn_cluster_router")).Should(o.BeTrue())
exutil.By("2. Run ovn-sbctl command with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55889-2"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "ovn-sbctl", "show"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "Port_Binding")).Should(o.BeTrue())
exutil.By("3. Run ovndb command in specified pod with " + scriptName + " script")
ovnNodePods := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
nodeName, getNodeErr := exutil.GetPodNodeName(oc, "openshift-ovn-kubernetes", ovnNodePods[0])
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
mustgatherDir = "/tmp/must-gather-55889-3"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "-p", ovnNodePods[0], "ovn-nbctl", "lr-list"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "GR_"+nodeName)).Should(o.BeTrue())
})
g.It("Author:qiowang-Medium-55887-Verify functionality of network-tools script - pod-run-netns-command", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
scriptName = "pod-run-netns-command"
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
exutil.By("0. Create hello-pod")
ns := oc.Namespace()
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
podIP := getPodIPv4(oc, ns, pod.name)
exutil.By("1. Run multiple commands with " + scriptName + " script")
mustgatherDir := "/tmp/must-gather-55887-1"
defer os.RemoveAll(mustgatherDir)
parameters := []string{"network-tools", scriptName, "--multiple-commands", pod.namespace, pod.name, "ip a show eth0; ip a show lo"}
output, cmdErr := collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, podIP)).Should(o.BeTrue())
o.Expect(strings.Contains(output, "127.0.0.1")).Should(o.BeTrue())
exutil.By("2. Run command that needs to preserve the literal meaning of with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55887-2"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "--no-substitution", pod.namespace, pod.name, `'i=0; i=$(( $i + 1 )); echo result$i'`}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "result1")).Should(o.BeTrue())
exutil.By("3. Run command and save the debug pod for 5 minutes with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55887-3"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "--preserve-pod", pod.namespace, pod.name, "timeout 5 tcpdump"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "DONE")).Should(o.BeTrue())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
07950155-e1b7-4261-90a1-25de27f6d795
|
Author:qiowang-Medium-67625-Medium-67648-Check ovnkube-trace - pod2pod traffic and pod2hostnetworkpod traffic
|
['"context"', '"os"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
g.It("Author:qiowang-Medium-67625-Medium-67648-Check ovnkube-trace - pod2pod traffic and pod2hostnetworkpod traffic", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
hostNetworkPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-hostnetwork-specific-node-template.yaml")
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
workerNode1 := nodeList.Items[0].Name
workerNode2 := nodeList.Items[1].Name
tmpPath := "/tmp/ocp-67625-67648"
defer os.RemoveAll(tmpPath)
exutil.By("1. Create hello-pod1, pod located on the first node")
ns := oc.Namespace()
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns,
nodename: workerNode1,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("2. Create hello-pod2 and hostnetwork hostnetwork-hello-pod2, pod located on the first node")
//Required for hostnetwork pod
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-group", "privileged", "system:serviceaccounts:"+ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pod2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns,
nodename: workerNode1,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
hostnetworkPod2 := pingPodResourceNode{
name: "hostnetwork-hello-pod2",
namespace: ns,
nodename: workerNode1,
template: hostNetworkPodTemplate,
}
hostnetworkPod2.createPingPodNode(oc)
waitPodReady(oc, hostnetworkPod2.namespace, hostnetworkPod2.name)
exutil.By("3. Create hello-pod3 and hostnetwork hostnetwork-hello-pod3, pod located on the second node")
pod3 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns,
nodename: workerNode2,
template: pingPodNodeTemplate,
}
pod3.createPingPodNode(oc)
waitPodReady(oc, pod3.namespace, pod3.name)
hostnetworkPod3 := pingPodResourceNode{
name: "hostnetwork-hello-pod3",
namespace: ns,
nodename: workerNode2,
template: hostNetworkPodTemplate,
}
hostnetworkPod3.createPingPodNode(oc)
waitPodReady(oc, hostnetworkPod3.namespace, hostnetworkPod3.name)
exutil.By("4. Simulate traffic between pod and pod when they land on the same node")
podIP1 := getPodIPv4(oc, ns, pod1.name)
addrFamily := "ip4"
if netutils.IsIPv6String(podIP1) {
addrFamily = "ip6"
}
cmd := "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + pod2.name + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
exutil.By("5. Simulate traffic between pod and pod when they land on different nodes")
cmd = "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + pod3.name + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr = collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
for _, expResult := range expPod2PodRemoteResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
exutil.By("6. Simulate traffic between pod and hostnetwork pod when they land on the same node")
cmd = "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + hostnetworkPod2.name + " -udp -addr-family " + addrFamily
traceOutput, cmdErr = collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
exutil.By("7. Simulate traffic between pod and hostnetwork pod when they land on different nodes")
cmd = "ovnkube-trace -src-namespace " + ns + " -src " + pod1.name + " -dst-namespace " + ns + " -dst " + hostnetworkPod3.name + " -udp -addr-family " + addrFamily
traceOutput, cmdErr = collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
o.Expect(strings.Contains(string(traceOutput), expPod2PodRemoteResult[1])).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
13246b60-6b5d-4834-a3d6-86902beaa91c
|
Author:qiowang-Medium-67649-Check ovnkube-trace - pod2service traffic
|
['"context"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
g.It("Author:qiowang-Medium-67649-Check ovnkube-trace - pod2service traffic", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
tmpPath := "/tmp/ocp-67649"
defer os.RemoveAll(tmpPath)
exutil.By("1. Create hello-pod")
ns := oc.Namespace()
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("2. Simulate traffic between pod and service")
podIP1 := getPodIPv4(oc, ns, pod.name)
addrFamily := "ip4"
if netutils.IsIPv6String(podIP1) {
addrFamily = "ip6"
}
cmd := "ovnkube-trace -src-namespace " + ns + " -src " + pod.name + " -dst-namespace openshift-dns -service dns-default -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2PodResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
for _, expResult := range expPod2SvcResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
40747fa1-846e-4631-8b54-31587a46e72a
|
Author:qiowang-NonPreRelease-Medium-55180-Check ovnkube-trace - pod2external traffic [Disruptive]
|
['"context"', '"os"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
g.It("Author:qiowang-NonPreRelease-Medium-55180-Check ovnkube-trace - pod2external traffic [Disruptive]", func() {
var (
testScope = []string{"without egressip", "with egressip"}
egressNodeLabel = "k8s.ovn.org/egress-assignable"
externalIPv4 = "8.8.8.8"
externalIPv6 = "2001:4860:4860::8888"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
// check if the cluster supported for test steps related to egressip
// focus on RDU dualstack/ipv6single cluster for ipv6 traffic, and other supported platforms for ipv4 traffic
testList := []string{testScope[0]}
addrFamily := "ip4"
externalIP := externalIPv4
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
addrFamily = "ip6"
externalIP = externalIPv6
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
e2e.Logf("Test steps related egressip will only run on rdu1 or rdu2 dualstack/ipv6single cluster, skip for other envrionment!!")
} else {
testList = append(testList, testScope[1])
}
} else {
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "aws") || strings.Contains(platform, "gcp") || strings.Contains(platform, "openstack") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "baremetal") || strings.Contains(platform, "azure") || strings.Contains(platform, "nutanix")
if !acceptedPlatform {
e2e.Logf("Test steps related egressip should be run on AWS/GCP/Azure/Openstack/Vsphere/BareMetal/Nutanix cluster, will skip for other platforms!!")
} else {
testList = append(testList, testScope[1])
}
}
tmpPath := "/tmp/ocp-55180"
defer os.RemoveAll(tmpPath)
var nsList, podList []string
for _, testItem := range testList {
exutil.By("Verify pod2external traffic when the pod associate " + testItem)
exutil.By("Create namespace")
oc.SetupProject()
ns := oc.Namespace()
nsList = append(nsList, ns)
if testItem == "with egressip" {
exutil.By("Label namespace with name=test")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name-").Execute()
nsLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "name=test").Execute()
o.Expect(nsLabelErr).NotTo(o.HaveOccurred())
exutil.By("Label EgressIP node")
egressNode := nodeList.Items[0].Name
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel)
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, egressNode, egressNodeLabel, "true")
exutil.By("Create egressip object")
var freeIPs []string
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
freeIPs = findFreeIPv6s(oc, egressNode, 2)
} else {
freeIPs = findFreeIPs(oc, egressNode, 2)
}
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip := egressIPResource1{
name: "egressip-55180",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer egressip.deleteEgressIPObject1(oc)
egressip.createEgressIPObject1(oc)
egressIPMaps := getAssignedEIPInEIPObject(oc, egressip.name)
o.Expect(len(egressIPMaps)).Should(o.Equal(1))
}
exutil.By("Create test pod in the namespace")
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
podList = append(podList, pod.name)
exutil.By("Simulate traffic between pod and external IP, pod associate " + testItem)
cmd := "ovnkube-trace -src-namespace " + ns + " -src " + pod.name + " -dst-ip " + externalIP + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2IPResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
}
exutil.By("Switch gateway mode")
origMode := getOVNGatewayMode(oc)
var desiredMode string
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
for i, testItem := range testList {
exutil.By("Simulate traffic between pod and external IP, pod associate " + testItem)
cmd := "ovnkube-trace -src-namespace " + nsList[i] + " -src " + podList[i] + " -dst-ip " + externalIP + " -tcp -addr-family " + addrFamily
traceOutput, cmdErr := collectMustGather(oc, tmpPath, image, []string{cmd})
o.Expect(cmdErr).NotTo(o.HaveOccurred())
for _, expResult := range expPod2IPResult {
o.Expect(strings.Contains(string(traceOutput), expResult)).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
224871dd-0573-4365-864c-0c47f03c052d
|
Author:qiowang-NonHyperShiftHOST-Medium-55890-Verify functionality of network-tools script - ovn-get
|
['"os"', '"os/exec"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
g.It("Author:qiowang-NonHyperShiftHOST-Medium-55890-Verify functionality of network-tools script - ovn-get", func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
scriptName := "ovn-get"
exutil.By("1. Get ovn-k/nbdb/sbdb leaders with " + scriptName + " script")
e2e.Logf("Get ovnk leader pod")
ovnkLeader := getOVNKMasterPod(oc)
mustgatherDir := "/tmp/must-gather-55890-1"
defer os.RemoveAll(mustgatherDir)
parameters := []string{"network-tools", scriptName, "leaders"}
output, cmdErr := collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ovn-k master leader "+ovnkLeader)).Should(o.BeTrue())
o.Expect(strings.Contains(output, "nbdb leader not applicable in ovn-ic mode")).Should(o.BeTrue())
o.Expect(strings.Contains(output, "sbdb leader not applicable in ovn-ic mode")).Should(o.BeTrue())
exutil.By("2. Download dbs with " + scriptName + " script")
e2e.Logf("Get all ovnkube-node pods")
ovnNodePods := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
mustgatherDir = "/tmp/must-gather-55890-2"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "dbs"}
_, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
files, getFilesErr := exec.Command("bash", "-c", "ls -l "+mustgatherDir+"/quay-io-openshift-release-dev-ocp*").Output()
o.Expect(getFilesErr).NotTo(o.HaveOccurred())
for _, podName := range ovnNodePods {
o.Expect(strings.Contains(string(files), podName+"_nbdb")).Should(o.BeTrue())
o.Expect(strings.Contains(string(files), podName+"_sbdb")).Should(o.BeTrue())
}
exutil.By("3. Get ovn cluster mode with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55890-3"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "mode"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "cluster is running in multi-zone (ovn-interconnect / ovn-ic)")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
5b812423-9d9e-49af-a742-56c892dc302b
|
Author:qiowang-Medium-55889-Verify functionality of network-tools script - ovn-db-run-command
|
['"os"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
g.It("Author:qiowang-Medium-55889-Verify functionality of network-tools script - ovn-db-run-command", func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
scriptName := "ovn-db-run-command"
exutil.By("1. Run ovn-nbctl command with " + scriptName + " script")
mustgatherDir := "/tmp/must-gather-55889-1"
defer os.RemoveAll(mustgatherDir)
parameters := []string{"network-tools", scriptName, "ovn-nbctl", "lr-list"}
output, cmdErr := collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "ovn_cluster_router")).Should(o.BeTrue())
exutil.By("2. Run ovn-sbctl command with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55889-2"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "ovn-sbctl", "show"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "Port_Binding")).Should(o.BeTrue())
exutil.By("3. Run ovndb command in specified pod with " + scriptName + " script")
ovnNodePods := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
nodeName, getNodeErr := exutil.GetPodNodeName(oc, "openshift-ovn-kubernetes", ovnNodePods[0])
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
mustgatherDir = "/tmp/must-gather-55889-3"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "-p", ovnNodePods[0], "ovn-nbctl", "lr-list"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "GR_"+nodeName)).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
c2f7f756-1d29-4993-8613-0a96e1b00882
|
Author:qiowang-Medium-55887-Verify functionality of network-tools script - pod-run-netns-command
|
['"context"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/network_tools.go
|
g.It("Author:qiowang-Medium-55887-Verify functionality of network-tools script - pod-run-netns-command", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
scriptName = "pod-run-netns-command"
)
nodeList, getNodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("Not enough nodes available for the test, skip the case!!")
}
exutil.By("0. Create hello-pod")
ns := oc.Namespace()
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
podIP := getPodIPv4(oc, ns, pod.name)
exutil.By("1. Run multiple commands with " + scriptName + " script")
mustgatherDir := "/tmp/must-gather-55887-1"
defer os.RemoveAll(mustgatherDir)
parameters := []string{"network-tools", scriptName, "--multiple-commands", pod.namespace, pod.name, "ip a show eth0; ip a show lo"}
output, cmdErr := collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, podIP)).Should(o.BeTrue())
o.Expect(strings.Contains(output, "127.0.0.1")).Should(o.BeTrue())
exutil.By("2. Run command that needs to preserve the literal meaning of with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55887-2"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "--no-substitution", pod.namespace, pod.name, `'i=0; i=$(( $i + 1 )); echo result$i'`}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "result1")).Should(o.BeTrue())
exutil.By("3. Run command and save the debug pod for 5 minutes with " + scriptName + " script")
mustgatherDir = "/tmp/must-gather-55887-3"
defer os.RemoveAll(mustgatherDir)
parameters = []string{"network-tools", scriptName, "--preserve-pod", pod.namespace, pod.name, "timeout 5 tcpdump"}
output, cmdErr = collectMustGather(oc, mustgatherDir, image, parameters)
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "DONE")).Should(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
78274da4-d397-4f8c-b2ba-00aa0dc6a0f9
|
networkpolicy
|
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
package networking
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN networkpolicy", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-networkpolicy", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
// author: [email protected]
g.It("Author:zzhao-Critical-49076-[FdpOvnOvs]-service domain can be resolved when egress type is enabled", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloSdnFile = filepath.Join(buildPruningBaseDir, "hellosdn.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress-allow-all.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/ingress-allow-all.yaml")
)
g.By("create new namespace")
oc.SetupProject()
g.By("create test pods")
createResourceFromFile(oc, oc.Namespace(), testPodFile)
createResourceFromFile(oc, oc.Namespace(), helloSdnFile)
err := waitForPodWithLabelReady(oc, oc.Namespace(), "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
err = waitForPodWithLabelReady(oc, oc.Namespace(), "name=hellosdn")
exutil.AssertWaitPollNoErr(err, "this pod with label name=hellosdn not ready")
g.By("create egress and ingress type networkpolicy")
createResourceFromFile(oc, oc.Namespace(), egressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-all-egress"))
createResourceFromFile(oc, oc.Namespace(), ingressTypeFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-all-ingress"))
g.By("check hellosdn pods can reolsve the dns after apply the networkplicy")
helloSdnName := getPodName(oc, oc.Namespace(), "name=hellosdn")
digOutput, err := e2eoutput.RunHostCmd(oc.Namespace(), helloSdnName[0], "dig kubernetes.default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(digOutput).Should(o.ContainSubstring("Got answer"))
o.Expect(digOutput).ShouldNot(o.ContainSubstring("connection timed out"))
g.By("check test-pods can reolsve the dns after apply the networkplicy")
testPodName := getPodName(oc, oc.Namespace(), "name=test-pods")
digOutput, err = e2eoutput.RunHostCmd(oc.Namespace(), testPodName[0], "dig kubernetes.default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(digOutput).Should(o.ContainSubstring("Got answer"))
o.Expect(digOutput).ShouldNot(o.ContainSubstring("connection timed out"))
})
// author: [email protected]
g.It("Author:huirwang-Critical-49186-[FdpOvnOvs] [Bug 2035336] Networkpolicy egress rule should work for statefulset pods.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloStatefulsetFile = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-egress-red.yaml")
)
g.By("1. Create first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("2. Create a statefulset pod in first namespace.")
createResourceFromFile(oc, ns1, helloStatefulsetFile)
err := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(err, "this pod with label app=hello not ready")
helloPodName := getPodName(oc, ns1, "app=hello")
g.By("3. Create networkpolicy with egress rule in first namespace.")
createResourceFromFile(oc, ns1, egressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-egress-to-red"))
g.By("4. Create second namespace.")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("5. Create test pods in second namespace.")
createResourceFromFile(oc, ns2, testPodFile)
err = waitForPodWithLabelReady(oc, oc.Namespace(), "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("6. Add label to first test pod in second namespace.")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "team=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
testPodName := getPodName(oc, ns2, "name=test-pods")
err = exutil.LabelPod(oc, ns2, testPodName[0], "type=red")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("6. Get IP of the test pods in second namespace.")
testPodIP1 := getPodIPv4(oc, ns2, testPodName[0])
testPodIP2 := getPodIPv4(oc, ns2, testPodName[1])
g.By("7. Check networkpolicy works.")
output, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
_, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
g.By("8. Delete statefulset pod for a couple of times.")
for i := 0; i < 5; i++ {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", helloPodName[0], "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(err, "this pod with label app=hello not ready")
}
g.By("9. Again checking networkpolicy works.")
output, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
_, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
})
// author: [email protected]
g.It("Author:anusaxen-High-49437-[FdpOvnOvs] [BZ 2037647] Ingress network policy shouldn't be overruled by egress network policy on another pod", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-allow-egress.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
g.By("Create first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create a hello pod in first namespace")
podns1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns1.createPingPodNode(oc)
waitPodReady(oc, podns1.namespace, podns1.name)
g.By("create default allow egress type networkpolicy in first namespace")
createResourceFromFile(oc, ns1, egressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-allow-egress"))
g.By("Create Second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create a hello-pod on 2nd namesapce on same node as first namespace")
pod1Ns2 := pingPodResourceNode{
name: "hello-pod",
namespace: ns2,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1Ns2.createPingPodNode(oc)
waitPodReady(oc, pod1Ns2.namespace, pod1Ns2.name)
g.By("create another hello-pod on 2nd namesapce but on different node")
pod2Ns2 := pingPodResourceNode{
name: "hello-pod-other-node",
namespace: ns2,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2Ns2.createPingPodNode(oc)
waitPodReady(oc, pod2Ns2.namespace, pod2Ns2.name)
helloPodNameNs2 := getPodName(oc, ns2, "name=hello-pod")
g.By("create default deny ingress type networkpolicy in 2nd namespace")
createResourceFromFile(oc, ns2, ingressTypeFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
g.By("3. Get IP of the test pods in second namespace.")
hellopodIP1Ns2 := getPodIPv4(oc, ns2, helloPodNameNs2[0])
hellopodIP2Ns2 := getPodIPv4(oc, ns2, helloPodNameNs2[1])
g.By("4. Curl both ns2 pods from ns1.")
_, err = e2eoutput.RunHostCmd(ns1, podns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(hellopodIP1Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
_, err = e2eoutput.RunHostCmd(ns1, podns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(hellopodIP2Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
})
// author: [email protected]
// modified by: [email protected]
g.It("NonHyperShiftHOST-Author:anusaxen-Medium-49686-[FdpOvnOvs] network policy with ingress rule with ipBlock", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockIngressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-dual-CIDRs-template.yaml")
ipBlockIngressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-single-CIDR-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("create 3rd hello pod in ns1")
pod3ns1 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod3ns1.createPingPodNode(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
helloPod1ns1IPv6, helloPod1ns1IPv4 := getPodIP(oc, ns1, pod1ns1.name)
helloPod1ns1IPv4WithCidr := helloPod1ns1IPv4 + "/32"
helloPod1ns1IPv6WithCidr := helloPod1ns1IPv6 + "/128"
if ipStackType == "dualstack" {
g.By("create ipBlock Ingress Dual CIDRs Policy in ns1")
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress",
template: ipBlockIngressTemplateDual,
cidrIpv4: helloPod1ns1IPv4WithCidr,
cidrIpv6: helloPod1ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-ingress"))
} else {
// For singlestack getPodIP returns second parameter empty therefore use helloPod1ns1IPv6 variable but append it
// with CIDR based on stack.
var helloPod1ns1IPWithCidr string
if ipStackType == "ipv6single" {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6WithCidr
} else {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6 + "/32"
}
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress",
template: ipBlockIngressTemplateSingle,
cidr: helloPod1ns1IPWithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-ingress"))
}
g.By("Checking connectivity from pod1 to pod3")
CurlPod2PodPass(oc, ns1, "hello-pod1", ns1, "hello-pod3")
g.By("Checking connectivity from pod2 to pod3")
CurlPod2PodFail(oc, ns1, "hello-pod2", ns1, "hello-pod3")
g.By("Create 2nd namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create 1st hello pod in ns2")
pod1ns2 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns2,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod1ns2.createPingPodNode(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
g.By("create 2nd hello pod in ns2")
pod2ns2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod2ns2.createPingPodNode(oc)
waitPodReady(oc, pod2ns2.namespace, pod2ns2.name)
g.By("Checking connectivity from pod1ns2 to pod3ns1")
CurlPod2PodFail(oc, ns2, "hello-pod1", ns1, "hello-pod3")
g.By("Checking connectivity from pod2ns2 to pod1ns1")
CurlPod2PodFail(oc, ns2, "hello-pod2", ns1, "hello-pod1")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
helloPod2ns2IPv6, helloPod2ns2IPv4 := getPodIP(oc, ns2, pod2ns2.name)
helloPod2ns2IPv4WithCidr := helloPod2ns2IPv4 + "/32"
helloPod2ns2IPv6WithCidr := helloPod2ns2IPv6 + "/128"
if ipStackType == "dualstack" {
g.By("create ipBlock Ingress Dual CIDRs Policy in ns1 again but with ipblock for pod2 ns2")
npIPBlockNS1New := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress",
template: ipBlockIngressTemplateDual,
cidrIpv4: helloPod2ns2IPv4WithCidr,
cidrIpv6: helloPod2ns2IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1New.createipBlockCIDRObjectDual(oc)
} else {
// For singlestack getPodIP returns second parameter empty therefore use helloPod2ns2IPv6 variable but append it
// with CIDR based on stack.
var helloPod2ns2IPWithCidr string
if ipStackType == "ipv6single" {
helloPod2ns2IPWithCidr = helloPod2ns2IPv6WithCidr
} else {
helloPod2ns2IPWithCidr = helloPod2ns2IPv6 + "/32"
}
npIPBlockNS1New := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress",
template: ipBlockIngressTemplateSingle,
cidr: helloPod2ns2IPWithCidr,
namespace: ns1,
}
npIPBlockNS1New.createipBlockCIDRObjectSingle(oc)
}
g.By("Checking connectivity from pod2 ns2 to pod3 ns1")
CurlPod2PodPass(oc, ns2, "hello-pod2", ns1, "hello-pod3")
g.By("Checking connectivity from pod1 ns2 to pod3 ns1")
CurlPod2PodFail(oc, ns2, "hello-pod1", ns1, "hello-pod3")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1 again so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1 again so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Check connectivity works fine across all failed ones above to make sure all policy flows are cleared properly")
g.By("Checking connectivity from pod2ns1 to pod3ns1")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod3")
g.By("Checking connectivity from pod1ns2 to pod3ns1")
CurlPod2PodPass(oc, ns2, "hello-pod1", ns1, "hello-pod3")
g.By("Checking connectivity from pod2ns2 to pod1ns1 on IPv4 interface")
CurlPod2PodPass(oc, ns2, "hello-pod2", ns1, "hello-pod1")
})
// author: [email protected]
g.It("Author:zzhao-Critical-49696-[FdpOvnOvs] mixed ingress and egress policies can work well", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloSdnFile = filepath.Join(buildPruningBaseDir, "hellosdn.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress_49696.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/ingress_49696.yaml")
)
g.By("create one namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("create test pods")
createResourceFromFile(oc, ns1, testPodFile)
createResourceFromFile(oc, ns1, helloSdnFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
err = waitForPodWithLabelReady(oc, ns1, "name=hellosdn")
exutil.AssertWaitPollNoErr(err, "this pod with label name=hellosdn not ready")
hellosdnPodNameNs1 := getPodName(oc, ns1, "name=hellosdn")
g.By("create egress type networkpolicy in ns1")
createResourceFromFile(oc, ns1, egressTypeFile)
g.By("create ingress type networkpolicy in ns1")
createResourceFromFile(oc, ns1, ingressTypeFile)
g.By("create second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create test pods in second namespace")
createResourceFromFile(oc, ns2, helloSdnFile)
err = waitForPodWithLabelReady(oc, ns2, "name=hellosdn")
exutil.AssertWaitPollNoErr(err, "this pod with label name=hellosdn not ready")
g.By("Get IP of the test pods in second namespace.")
hellosdnPodNameNs2 := getPodName(oc, ns2, "name=hellosdn")
hellosdnPodIP1Ns2 := getPodIPv4(oc, ns2, hellosdnPodNameNs2[0])
g.By("curl from ns1 hellosdn pod to ns2 pod")
_, err = e2eoutput.RunHostCmd(ns1, hellosdnPodNameNs1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(hellosdnPodIP1Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
})
// author: [email protected]
g.It("Author:anusaxen-High-46246-[FdpOvnOvs] Network Policies should work with OVNKubernetes when traffic hairpins back to the same source through a service", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allowfromsameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a namespace")
oc.SetupProject()
ns := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns, pod1.name)
g.By("create 2nd hello pod in same namespace but on different node")
pod2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns, pod2.name)
g.By("Create a test service backing up both the above pods")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
svc.ipFamilyPolicy = "SingleStack"
svc.createServiceFromParams(oc)
g.By("create allow-from-same-namespace ingress networkpolicy in ns")
createResourceFromFile(oc, ns, allowfromsameNS)
g.By("curl from hello-pod1 to hello-pod2")
CurlPod2PodPass(oc, ns, "hello-pod1", ns, "hello-pod2")
g.By("curl from hello-pod2 to hello-pod1")
CurlPod2PodPass(oc, ns, "hello-pod2", ns, "hello-pod1")
for i := 0; i < 5; i++ {
g.By("curl from hello-pod1 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod1", "test-service")
g.By("curl from hello-pod2 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod2", "test-service")
}
g.By("Make sure pods are curl'able from respective nodes")
CurlNode2PodPass(oc, pod1.nodename, ns, "hello-pod1")
CurlNode2PodPass(oc, pod2.nodename, ns, "hello-pod2")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
g.By("Delete testservice from ns")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Checking pod to svc:port behavior now on with PreferDualStack Service")
svc.ipFamilyPolicy = "PreferDualStack"
svc.createServiceFromParams(oc)
for i := 0; i < 5; i++ {
g.By("curl from hello-pod1 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod1", "test-service")
g.By("curl from hello-pod2 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod2", "test-service")
}
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:huirwang-High-41879-[FdpOvnOvs] ipBlock should not ignore all other cidr's apart from the last one specified ", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockIngressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-dual-multiple-CIDRs-template.yaml")
ipBlockIngressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-single-multiple-CIDRs-template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
ipStackType := checkIPStackType(oc)
if ipStackType == "ipv4single" {
g.Skip("This case requires dualstack or Single Stack IPv6 cluster")
}
g.By("Create a namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("create test pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("Scale test pods to 5")
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=5", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("Get 3 test pods's podname and IPs")
testPodName := getPodName(oc, ns1, "name=test-pods")
testPod1IPv6, testPod1IPv4 := getPodIP(oc, ns1, testPodName[0])
testPod1IPv4WithCidr := testPod1IPv4 + "/32"
testPod1IPv6WithCidr := testPod1IPv6 + "/128"
testPod2IPv6, testPod2IPv4 := getPodIP(oc, ns1, testPodName[1])
testPod2IPv4WithCidr := testPod2IPv4 + "/32"
testPod2IPv6WithCidr := testPod2IPv6 + "/128"
testPod3IPv6, testPod3IPv4 := getPodIP(oc, ns1, testPodName[2])
testPod3IPv4WithCidr := testPod3IPv4 + "/32"
testPod3IPv6WithCidr := testPod3IPv6 + "/128"
if ipStackType == "dualstack" {
g.By("create ipBlock Ingress Dual CIDRs Policy in ns1")
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress-41879",
template: ipBlockIngressTemplateDual,
cidrIpv4: testPod1IPv4WithCidr,
cidrIpv6: testPod1IPv6WithCidr,
cidr2Ipv4: testPod2IPv4WithCidr,
cidr2Ipv6: testPod2IPv6WithCidr,
cidr3Ipv4: testPod3IPv4WithCidr,
cidr3Ipv6: testPod3IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createIPBlockMultipleCIDRsObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-ingress-41879"))
} else {
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress-41879",
template: ipBlockIngressTemplateSingle,
cidr: testPod1IPv6WithCidr,
cidr2: testPod2IPv6WithCidr,
cidr3: testPod3IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createIPBlockMultipleCIDRsObjectSingle(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-ingress-41879"))
}
g.By("Checking connectivity from pod1 to pod5")
CurlPod2PodPass(oc, ns1, testPodName[0], ns1, testPodName[4])
g.By("Checking connectivity from pod2 to pod5")
CurlPod2PodPass(oc, ns1, testPodName[1], ns1, testPodName[4])
g.By("Checking connectivity from pod3 to pod5")
CurlPod2PodPass(oc, ns1, testPodName[2], ns1, testPodName[4])
g.By("Checking connectivity from pod4 to pod5")
CurlPod2PodFail(oc, ns1, testPodName[3], ns1, testPodName[4])
})
// author: [email protected]
g.It("Author:asood-Medium-46807-[FdpOvnOvs] network policy with egress rule with ipBlock", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockEgressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-dual-CIDRs-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-single-CIDR-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("create 3rd hello pod in ns1")
pod3ns1 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod3ns1.createPingPodNode(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
helloPod1ns1IP1, helloPod1ns1IP2 := getPodIP(oc, ns1, pod1ns1.name)
if ipStackType == "dualstack" {
helloPod1ns1IPv6WithCidr := helloPod1ns1IP1 + "/128"
helloPod1ns1IPv4WithCidr := helloPod1ns1IP2 + "/32"
g.By("create ipBlock Egress Dual CIDRs Policy in ns1")
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-egress",
template: ipBlockEgressTemplateDual,
cidrIpv4: helloPod1ns1IPv4WithCidr,
cidrIpv6: helloPod1ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-egress"))
} else {
if ipStackType == "ipv6single" {
helloPod1ns1IPv6WithCidr := helloPod1ns1IP1 + "/128"
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPod1ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
} else {
helloPod1ns1IPv4WithCidr := helloPod1ns1IP1 + "/32"
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPod1ns1IPv4WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
}
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress"))
}
g.By("Checking connectivity from pod2 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
g.By("Checking connectivity from pod2 to pod3")
CurlPod2PodFail(oc, ns1, "hello-pod2", ns1, "hello-pod3")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-egress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-egress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Check connectivity works fine across all failed ones above to make sure all policy flows are cleared properly")
g.By("Checking connectivity from pod2 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
g.By("Checking connectivity from pod2 to pod3")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod3")
})
// author: [email protected]
g.It("Author:asood-Medium-46808-[FdpOvnOvs] network policy with egress rule with ipBlock and except", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockEgressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-except-dual-CIDRs-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-except-single-CIDR-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1 on node[0]")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1 on node[0]")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("create 3rd hello pod in ns1 on node[1]")
pod3ns1 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod3ns1.createPingPodNode(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
g.By("create 4th hello pod in ns1 on node[1]")
pod4ns1 := pingPodResourceNode{
name: "hello-pod4",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod4ns1.createPingPodNode(oc)
waitPodReady(oc, pod4ns1.namespace, pod4ns1.name)
helloPod2ns1IP1, helloPod2ns1IP2 := getPodIP(oc, ns1, pod2ns1.name)
if ipStackType == "dualstack" {
hostSubnetCIDRIPv4, hostSubnetCIDRIPv6 := getNodeSubnetDualStack(oc, nodeList.Items[0].Name, "default")
o.Expect(hostSubnetCIDRIPv6).NotTo(o.BeEmpty())
o.Expect(hostSubnetCIDRIPv4).NotTo(o.BeEmpty())
helloPod2ns1IPv6WithCidr := helloPod2ns1IP1 + "/128"
helloPod2ns1IPv4WithCidr := helloPod2ns1IP2 + "/32"
g.By("create ipBlock Egress CIDRs with except rule Policy in ns1 on dualstack")
npIPBlockNS1 := ipBlockCIDRsExceptDual{
name: "ipblock-dual-cidrs-egress-except",
template: ipBlockEgressTemplateDual,
cidrIpv4: hostSubnetCIDRIPv4,
cidrIpv4Except: helloPod2ns1IPv4WithCidr,
cidrIpv6: hostSubnetCIDRIPv6,
cidrIpv6Except: helloPod2ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockExceptObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-egress-except"))
} else {
if ipStackType == "ipv6single" {
hostSubnetCIDRIPv6 := getNodeSubnet(oc, nodeList.Items[0].Name, "default")
o.Expect(hostSubnetCIDRIPv6).NotTo(o.BeEmpty())
helloPod2ns1IPv6WithCidr := helloPod2ns1IP1 + "/128"
g.By("create ipBlock Egress CIDRs with except rule Policy in ns1 on IPv6 singlestack")
npIPBlockNS1 := ipBlockCIDRsExceptSingle{
name: "ipblock-single-cidr-egress-except",
template: ipBlockEgressTemplateSingle,
cidr: hostSubnetCIDRIPv6,
except: helloPod2ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockExceptObjectSingle(oc, true)
} else {
hostSubnetCIDRIPv4 := getNodeSubnet(oc, nodeList.Items[0].Name, "default")
o.Expect(hostSubnetCIDRIPv4).NotTo(o.BeEmpty())
helloPod2ns1IPv4WithCidr := helloPod2ns1IP1 + "/32"
g.By("create ipBlock Egress CIDRs with except rule Policy in ns1 on IPv4 singlestack")
npIPBlockNS1 := ipBlockCIDRsExceptSingle{
name: "ipblock-single-cidr-egress-except",
template: ipBlockEgressTemplateSingle,
cidr: hostSubnetCIDRIPv4,
except: helloPod2ns1IPv4WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockExceptObjectSingle(oc, true)
}
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress-except"))
}
g.By("Checking connectivity from pod3 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod1")
g.By("Checking connectivity from pod3 to pod2")
CurlPod2PodFail(oc, ns1, "hello-pod3", ns1, "hello-pod2")
g.By("Checking connectivity from pod3 to pod4")
CurlPod2PodFail(oc, ns1, "hello-pod3", ns1, "hello-pod4")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-egress-except", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-egress-except", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Check connectivity works fine across all failed ones above to make sure all policy flows are cleared properly")
g.By("Checking connectivity from pod3 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod1")
g.By("Checking connectivity from pod3 to pod2")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod2")
g.By("Checking connectivity from pod3 to pod4")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod4")
})
// author: [email protected]
g.It("Author:asood-Medium-41082-Check ACL audit logs can be extracted", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromSameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("Enable ACL looging on the namespace ns1")
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "alert"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", ns1, aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("create default deny ingress networkpolicy in ns1")
createResourceFromFile(oc, ns1, ingressTypeFile)
g.By("create allow same namespace networkpolicy in ns1")
createResourceFromFile(oc, ns1, allowFromSameNS)
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("Checking connectivity from pod2 to pod1 to generate messages")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
output, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, "--path=ovn/acl-audit-log.log").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=allow")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:asood-Medium-41407-[FdpOvnOvs] Check networkpolicy ACL audit message is logged with correct policy name", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromSameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
var namespaces [2]string
policyList := [2]string{"default-deny-ingress", "allow-from-same-namespace"}
for i := 0; i < 2; i++ {
namespaces[i] = oc.Namespace()
exutil.By(fmt.Sprintf("Enable ACL looging on the namespace %s", namespaces[i]))
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "warning"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", namespaces[i], aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("Create default deny ingress networkpolicy in %s", namespaces[i]))
createResourceFromFile(oc, namespaces[i], ingressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(policyList[0]))
exutil.By(fmt.Sprintf("Create allow same namespace networkpolicy in %s", namespaces[i]))
createResourceFromFile(oc, namespaces[i], allowFromSameNS)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(policyList[1]))
pod := pingPodResourceNode{
name: "",
namespace: namespaces[i],
nodename: "",
template: pingPodNodeTemplate,
}
for j := 0; j < 2; j++ {
exutil.By(fmt.Sprintf("Create hello pod in %s", namespaces[i]))
pod.name = "hello-pod" + strconv.Itoa(j)
pod.nodename = nodeList.Items[j].Name
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
}
exutil.By(fmt.Sprintf("Checking connectivity from second pod to first pod to generate messages in %s", namespaces[i]))
CurlPod2PodPass(oc, namespaces[i], "hello-pod1", namespaces[i], "hello-pod0")
oc.SetupProject()
}
output, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, "--path=ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ACL logs for allow-from-same-namespace policy \n %s", output)
// policy name truncated to allow-from-same-name in ACL log message
for i := 0; i < len(namespaces); i++ {
searchString := fmt.Sprintf("name=\"NP:%s:allow-from-same-name\", verdict=allow, severity=warning", namespaces[i])
o.Expect(strings.Contains(output, searchString)).To(o.BeTrue())
removeResource(oc, true, true, "networkpolicy", policyList[1], "-n", namespaces[i])
CurlPod2PodFail(oc, namespaces[i], "hello-pod0", namespaces[i], "hello-pod1")
}
output, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[1].Name, "--path=ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ACL logs for default-deny-ingress policy \n %s", output)
for i := 0; i < len(namespaces); i++ {
searchString := fmt.Sprintf("name=\"NP:%s:Ingress\", verdict=drop, severity=alert", namespaces[i])
o.Expect(strings.Contains(output, searchString)).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:asood-NonPreRelease-Longduration-WRS-Medium-41080-V-BR.33-Check network policy ACL audit messages are logged to journald", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromSameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Configure audit message logging destination to journald")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfo := `{"spec":{"defaultNetwork":{"ovnKubernetesConfig":{"policyAuditConfig": {"destination": "libc"}}}}}`
undoPatchInfo := `{"spec":{"defaultNetwork":{"ovnKubernetesConfig":{"policyAuditConfig": {"destination": ""}}}}}`
defer func() {
_, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", undoPatchInfo, "--type=merge").Output()
o.Expect(patchErr).NotTo(o.HaveOccurred())
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
}()
_, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", patchInfo, "--type=merge").Output()
o.Expect(patchErr).NotTo(o.HaveOccurred())
//Network operator needs to recreate the pods on a merge request, therefore give it enough time.
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("Enable ACL looging on the namespace ns1")
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "alert"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", ns1, aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("create default deny ingress networkpolicy in ns1")
createResourceFromFile(oc, ns1, ingressTypeFile)
g.By("create allow same namespace networkpolicy in ns1")
createResourceFromFile(oc, ns1, allowFromSameNS)
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("Checking connectivity from pod2 to pod1 to generate messages")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
g.By("Checking messages are logged to journald")
cmd := fmt.Sprintf("journalctl -t ovn-controller --since '1min ago'| grep 'verdict=allow'")
output, journalctlErr := exutil.DebugNodeWithOptionsAndChroot(oc, nodeList.Items[0].Name, []string{"-q"}, "bin/sh", "-c", cmd)
e2e.Logf("Output %s", output)
o.Expect(journalctlErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=allow")).To(o.BeTrue())
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:anusaxen-Medium-55287-[FdpOvnOvs] Default network policy ACLs to a namespace should not be present with arp but arp||nd for ARPAllowPolicies", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
)
g.By("This is for BZ 2095852")
g.By("create new namespace")
oc.SetupProject()
g.By("create test pods")
createResourceFromFile(oc, oc.Namespace(), testPodFile)
err := waitForPodWithLabelReady(oc, oc.Namespace(), "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("create ingress default-deny type networkpolicy")
createResourceFromFile(oc, oc.Namespace(), ingressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny"))
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
g.By("get ACLs related to ns")
//list ACLs only related namespace in test
listACLCmd := "ovn-nbctl list ACL | grep -C 5 " + "NP:" + oc.Namespace() + " | grep -C 5 type=arpAllow"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("Output %s", listOutput)
o.Expect(listOutput).To(o.ContainSubstring("&& (arp || nd)"))
o.Expect(listOutput).ShouldNot(o.ContainSubstring("&& arp"))
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:huirwang-High-62524-[FdpOvnOvs] OVN address_set referenced in acl should not miss when networkpolicy name includes dot.", func() {
// This is for customer bug https://issues.redhat.com/browse/OCPBUGS-4085
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
networkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress-ingress-62524.yaml")
)
g.By("Check cluster network type")
g.By("Get namespace")
ns := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "team-").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "team=openshift-networking").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create test pods")
createResourceFromFile(oc, ns, testPodFile)
err = waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testPod := getPodName(oc, ns, "name=test-pods")
g.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
g.By("create egress-ingress type networkpolicy")
createResourceFromFile(oc, ns, networkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("egress-ingress-62524.test"))
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
g.By("Verify the address_set exists for the specific acl")
//list ACLs related to the networkpolicy name
listACLCmd := "ovn-nbctl --data=bare --no-heading --format=table find acl | grep egress-ingress-62524.test"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listOutput).NotTo(o.BeEmpty())
// Get the address set name from the acls
regex := `\{\$(\w+)\}`
re := regexp.MustCompile(regex)
matches := re.FindAllStringSubmatch(listOutput, -1)
if len(matches) == 0 {
e2e.Fail("No matched address_set name found")
}
var result []string
for _, match := range matches {
if len(match) == 2 { // Check if a match was found
result = append(result, match[1]) // Append the captured group to the result slice
}
}
if len(result) == 0 {
e2e.Fail("No matched address_set name found")
}
//Check adress_set can be found when ovn-nbctl list address_set
for _, addrSetName := range result {
listAddressSetCmd := "ovn-nbctl --no-leader-only list address_set | grep " + addrSetName
listAddrOutput, listAddrErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listAddressSetCmd)
o.Expect(listAddrErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
}
g.By("Checking pods connectivity")
CurlPod2PodPass(oc, ns, testPod[0], ns, pod1.name)
CurlPod2PodFail(oc, ns, testPod[0], ns, testPod[1])
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:asood-Critical-65901-[FdpOvnOvs] Duplicate transactions should not be executed for network policy for every pod update.", func() {
// Customer https://issues.redhat.com/browse/OCPBUGS-4659
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
networkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-ingress-red.yaml")
testPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
exutil.By("Obtain the namespace")
ns := oc.Namespace()
exutil.By("Create a pod in namespace")
pod := pingPodResource{
name: "test-pod",
namespace: ns,
template: testPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", pod.namespace, "pod", pod.name, "type=red").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("Create a network policy")
createResourceFromFile(oc, ns, networkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-ingress-to-red"))
exutil.By("Obtain the transaction count to be 1")
podIP1, _ := getPodIP(oc, ns, pod.name)
podNodeName, podNodenameErr := exutil.GetPodNodeName(oc, ns, pod.name)
o.Expect(podNodeName).NotTo(o.BeEmpty())
o.Expect(podNodenameErr).NotTo(o.HaveOccurred())
e2e.Logf("Node on which pod %s is running %s", pod.name, podNodeName)
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", podNodeName)
o.Expect(ovnKNodePod).NotTo(o.BeEmpty())
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, podNodeName)
getCmd := fmt.Sprintf("cat /var/log/ovnkube/libovsdb.log | grep 'transacting operations' | grep '%s' ", podIP1)
logContents, logErr1 := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", getCmd)
o.Expect(logErr1).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("Log content before label update \n %s", logContents))
logLinesCount := len(strings.Split(logContents, "\n")) - 1
exutil.By("Label the pods to see transaction count is unchanged")
_, reLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", pod.namespace, "--overwrite", "pod", pod.name, "type=blue").Output()
o.Expect(reLabelErr).NotTo(o.HaveOccurred())
newLogContents, logErr2 := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", getCmd)
o.Expect(logErr2).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("Log content after label update \n %s", newLogContents))
newLogLinesCount := len(strings.Split(newLogContents, "\n")) - 1
o.Expect(logLinesCount).To(o.Equal(newLogLinesCount))
})
g.It("Author:asood-High-66085-[FdpOvnOvs] Creating egress network policies for allowing to same namespace and openshift dns in namespace prevents the pod from reaching its own service", func() {
// https://issues.redhat.com/browse/OCPBUGS-4909
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
allowToNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-to-same-namespace.yaml")
allowToDNSNPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-to-openshift-dns.yaml")
podsInProject = []string{"hello-pod-1", "other-pod"}
svcURL string
)
exutil.By("Get first namespace and create another")
ns := oc.Namespace()
exutil.By("Create set of pods with different labels")
for _, podItem := range podsInProject {
pod1 := pingPodResource{
name: podItem,
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, ns, pod1.name)
}
exutil.By("Label the pods to ensure the pod does not serve the service")
_, reLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns, "--overwrite", "pod", podsInProject[1], "name=other-pod").Output()
o.Expect(reLabelErr).NotTo(o.HaveOccurred())
exutil.By("Create a service for one of the pods")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "SingleStack",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
svc.createServiceFromParams(oc)
exutil.By("Check service status")
svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename))
exutil.By("Obtain the service URL")
svcURL = fmt.Sprintf("http://%s.%s.svc:27017", svc.servicename, svc.namespace)
e2e.Logf("Service URL %s", svcURL)
exutil.By("Check the connectivity to service from the pods in the namespace")
for _, podItem := range podsInProject {
output, err := e2eoutput.RunHostCmd(ns, podItem, "curl --connect-timeout 5 -s "+svcURL)
o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create the network policies in the namespace")
exutil.By("Create the allow to same namespace policy in the namespace")
createResourceFromFile(oc, ns, allowToNSNetworkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-same-namespace"))
exutil.By("Create the allow to DNS policy in the namespace")
createResourceFromFile(oc, ns, allowToDNSNPolicyFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-openshift-dns"))
exutil.By("Create another pod to serve the service")
anotherPod := pingPodResource{
name: "hello-pod-2",
namespace: ns,
template: pingPodTemplate,
}
anotherPod.createPingPod(oc)
waitPodReady(oc, ns, anotherPod.name)
podsInProject = append(podsInProject, anotherPod.name)
exutil.By("Check the connectivity to service again from the pods in the namespace")
for _, eachPod := range podsInProject {
output, err := e2eoutput.RunHostCmd(ns, eachPod, "curl --connect-timeout 5 -s "+svcURL)
o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
})
g.It("Author:asood-Medium-64787-[FdpOvnOvs] Network policy with duplicate egress rules (same CIDR block) fails to be recreated [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-5835
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockEgressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-dual-multiple-CIDRs-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-single-multiple-CIDRs-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Obtain the namespace")
ns := oc.Namespace()
exutil.By("create a hello pod in namspace")
podns := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns.createPingPodNode(oc)
waitPodReady(oc, podns.namespace, podns.name)
helloPodnsIP1, helloPodnsIP2 := getPodIP(oc, ns, podns.name)
var policyName string
if ipStackType == "dualstack" {
helloPodnsIPv6WithCidr := helloPodnsIP1 + "/128"
helloPodnsIPv4WithCidr := helloPodnsIP2 + "/32"
exutil.By("Create ipBlock Egress Dual with multiple CIDRs Policy in namespace")
npIPBlockNS := ipBlockCIDRsDual{
name: "ipblock-dual-multiple-cidrs-egress",
template: ipBlockEgressTemplateDual,
cidrIpv4: helloPodnsIPv4WithCidr,
cidrIpv6: helloPodnsIPv6WithCidr,
cidr2Ipv4: helloPodnsIPv4WithCidr,
cidr2Ipv6: helloPodnsIPv6WithCidr,
cidr3Ipv4: helloPodnsIPv4WithCidr,
cidr3Ipv6: helloPodnsIPv6WithCidr,
namespace: ns,
}
npIPBlockNS.createIPBlockMultipleCIDRsObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(npIPBlockNS.name))
policyName = npIPBlockNS.name
} else {
var npIPBlockNS ipBlockCIDRsSingle
if ipStackType == "ipv6single" {
helloPodnsIPv6WithCidr := helloPodnsIP1 + "/128"
npIPBlockNS = ipBlockCIDRsSingle{
name: "ipblock-single-multiple-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPodnsIPv6WithCidr,
cidr2: helloPodnsIPv6WithCidr,
cidr3: helloPodnsIPv6WithCidr,
namespace: ns,
}
} else {
helloPodnsIPv4WithCidr := helloPodnsIP1 + "/32"
npIPBlockNS = ipBlockCIDRsSingle{
name: "ipblock-single-multiple-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPodnsIPv4WithCidr,
cidr2: helloPodnsIPv4WithCidr,
cidr3: helloPodnsIPv4WithCidr,
namespace: ns,
}
}
npIPBlockNS.createIPBlockMultipleCIDRsObjectSingle(oc)
output, err := oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(npIPBlockNS.name))
policyName = npIPBlockNS.name
}
exutil.By("Delete the ovnkube node pod on the node")
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnKNodePod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for new ovnkube-node pod recreated on the node")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
exutil.By("Check for error message related network policy")
e2e.Logf("ovnkube-node new podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
filterString := fmt.Sprintf(" %s/%s ", ns, policyName)
e2e.Logf("Filter String %s", filterString)
logContents, logErr := exutil.GetSpecificPodLogs(oc, "openshift-ovn-kubernetes", "ovnkube-controller", ovnKNodePod, filterString)
o.Expect(logErr).NotTo(o.HaveOccurred())
e2e.Logf("Log contents \n%s", logContents)
o.Expect(strings.Contains(logContents, "failed")).To(o.BeFalse())
})
g.It("Author:asood-Critical-64786-[FdpOvnOvs] Network policy in namespace that has long name fails to be recreated as the ACLs are considered duplicate [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-15371
var (
testNs = "test-64786networkpolicy-with-a-62chars-62chars-long-namespace62"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowToNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-to-same-namespace.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) == 0).NotTo(o.BeTrue())
exutil.By("Create a namespace with a long name")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("project", testNs, "--ignore-not-found").Execute()
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
nsCreateErr := oc.WithoutNamespace().Run("new-project").Args(testNs).Execute()
o.Expect(nsCreateErr).NotTo(o.HaveOccurred())
exutil.By("Create a hello pod in namspace")
podns := pingPodResource{
name: "hello-pod",
namespace: testNs,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Create a network policy in namespace")
createResourceFromFile(oc, testNs, allowToNSNetworkPolicyFile)
checkErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
output, err := oc.WithoutNamespace().Run("get").Args("networkpolicy", "-n", testNs).Output()
if err != nil {
e2e.Logf("%v,Waiting for policy to be created, try again ...,", err)
return false, nil
}
// Check network policy
if strings.Contains(output, "allow-to-same-namespace") {
e2e.Logf("Network policy created")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Network policy could not be created")
exutil.By("Delete the ovnkube node pod on the node")
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnKNodePod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for new ovnkube-node pod recreated on the node")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
exutil.By("Check for error message related network policy")
e2e.Logf("ovnkube-node new podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
filterString := fmt.Sprintf(" %s/%s ", testNs, "allow-to-same-namespace")
e2e.Logf("Filter String %s", filterString)
logContents, logErr := exutil.GetSpecificPodLogs(oc, "openshift-ovn-kubernetes", "ovnkube-controller", ovnKNodePod, filterString)
o.Expect(logErr).NotTo(o.HaveOccurred())
e2e.Logf("Log contents \n%s", logContents)
o.Expect(strings.Contains(logContents, "failed")).To(o.BeFalse())
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:asood-High-64788-[FdpOvnOvs] Same network policies across multiple namespaces fail to be recreated [Disruptive].", func() {
// This is for customer bug https://issues.redhat.com/browse/OCPBUGS-11447
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
networkPolicyFileSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-single-CIDR-template.yaml")
networkPolicyFileDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-dual-CIDRs-template.yaml")
policyName = "ipblock-64788"
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a test pods")
createResourceFromFile(oc, ns, testPodFile)
err := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "The pod with label name=test-pods is not ready")
testPod := getPodName(oc, ns, "name=test-pods")
nodeName, err := exutil.GetPodNodeName(oc, ns, testPod[0])
o.Expect(err).NotTo(o.HaveOccurred())
helloPod1ns1IPv6, helloPod1ns1IPv4 := getPodIP(oc, ns, testPod[0])
helloPod1ns1IPv4WithCidr := helloPod1ns1IPv4 + "/32"
helloPod1ns1IPv6WithCidr := helloPod1ns1IPv6 + "/128"
exutil.By("Create ipBlock Ingress CIDRs Policy in namespace")
if ipStackType == "dualstack" {
npIPBlockNS1 := ipBlockCIDRsDual{
name: policyName,
template: networkPolicyFileDual,
cidrIpv4: helloPod1ns1IPv4WithCidr,
cidrIpv6: helloPod1ns1IPv6WithCidr,
namespace: ns,
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
} else {
// For singlestack getPodIP returns second parameter empty therefore use helloPod1ns1IPv6 variable but append it
// with CIDR based on stack.
var helloPod1ns1IPWithCidr string
if ipStackType == "ipv6single" {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6WithCidr
} else {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6 + "/32"
}
npIPBlockNS1 := ipBlockCIDRsSingle{
name: policyName,
template: networkPolicyFileSingle,
cidr: helloPod1ns1IPWithCidr,
namespace: ns,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
}
exutil.By("Check the policy has been created")
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(policyName))
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeName)
exutil.By("Get the ACL for the created policy")
//list ACLs related to the networkpolicy name
aclName := fmt.Sprintf("'NP:%s:%s:Ingres'", ns, policyName)
listACLCmd := fmt.Sprintf("ovn-nbctl find acl name='NP\\:%s\\:%s\\:Ingres'", ns, policyName)
listAclOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAclOutput).NotTo(o.BeEmpty())
e2e.Logf(listAclOutput)
var aclMap map[string]string
var listPGCmd string
//Dual stack has two ACLs for policy and uuid of both are needed to get port group
if ipStackType == "dualstack" {
listAcls := strings.Split(listAclOutput, "\n\n")
aclMap = nbContructToMap(listAcls[0])
o.Expect(len(aclMap)).NotTo(o.Equal(0))
aclMap1 := nbContructToMap(listAcls[1])
o.Expect(len(aclMap1)).NotTo(o.Equal(0))
listPGCmd = fmt.Sprintf("ovn-nbctl find port-group acls='[%s, %s]'", aclMap["_uuid"], aclMap1["_uuid"])
} else {
aclMap = nbContructToMap(listAclOutput)
o.Expect(len(aclMap)).NotTo(o.Equal(0))
listPGCmd = fmt.Sprintf("ovn-nbctl find port-group acls='[%s]'", aclMap["_uuid"])
}
aclMap["name"] = aclName
exutil.By("Get the port group for the created policy")
listPGOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listPGCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listPGOutput).NotTo(o.BeEmpty())
e2e.Logf(listPGOutput)
pgMap := nbContructToMap(listPGOutput)
o.Expect(len(pgMap)).NotTo(o.Equal(0))
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", policyName, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a duplicate ACL")
createAclCmd := fmt.Sprintf("ovn-nbctl --id=@copyacl create acl name=copyacl direction=%s action=%s -- add port_group %s acl @copyacl", aclMap["direction"], aclMap["action"], pgMap["_uuid"])
idOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", createAclCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(idOutput).NotTo(o.BeEmpty())
e2e.Logf(idOutput)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", policyName, "-n", ns).Execute()
exutil.By("Set properties of duplicate ACL")
setAclPropertiesCmd := fmt.Sprintf("ovn-nbctl set acl %s match='%s' priority=%s meter=%s", idOutput, aclMap["match"], aclMap["priority"], aclMap["meter"])
_, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", setAclPropertiesCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", policyName, "-n", ns).Execute()
exutil.By("Set name of duplicate ACL")
dupAclName := fmt.Sprintf("'NP\\:%s\\:%s\\:Ingre0'", ns, policyName)
setAclNameCmd := fmt.Sprintf("ovn-nbctl set acl %s name=%s", idOutput, dupAclName)
_, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", setAclNameCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
exutil.By("Check duplicate ACL is created successfully")
listDupACLCmd := fmt.Sprintf("ovn-nbctl find acl name='NP\\:%s\\:%s\\:Ingre0'", ns, policyName)
listDupAclOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listDupACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listDupAclOutput).NotTo(o.BeEmpty())
e2e.Logf(listDupAclOutput)
exutil.By("Delete the ovnkube node pod on the node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeName)
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnKNodePod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for new ovnkube-node pod to be recreated on the node")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
exutil.By("Check the duplicate ACL is removed")
listAclOutput, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAclOutput).NotTo(o.BeEmpty(), listAclOutput)
listDupAclOutput, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listDupACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listDupAclOutput).To(o.BeEmpty())
})
// author: [email protected]
g.It("Author:asood-Medium-68660-[FdpOvnOvs] Exposed route of the service should be accessible when allowing inbound traffic from any namespace network policy is created.", func() {
// https://issues.redhat.com/browse/OCPBUGS-14632
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromAllNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-all-namespaces.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
serviceName = "test-service-68660"
)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a hello pod in namspace")
podns := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Create a test service which is in front of the above pod")
svc := genericServiceResource{
servicename: serviceName,
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "PreferDualStack",
internalTrafficPolicy: "Local",
externalTrafficPolicy: "",
template: genericServiceTemplate,
}
svc.createServiceFromParams(oc)
exutil.By("Expose the service through a route")
err := oc.AsAdmin().WithoutNamespace().Run("expose").Args("svc", serviceName, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svcRoute, routeErr := oc.AsAdmin().Run("get").Args("route", serviceName, "-n", ns, "-o=jsonpath={.spec.host}").Output()
o.Expect(routeErr).NotTo(o.HaveOccurred())
o.Expect(svcRoute).ShouldNot(o.Equal(""))
exutil.By("Access the route before network policy creation")
var svcErr error
var routeCurlOutput []byte
o.Eventually(func() string {
routeCurlOutput, svcErr = exec.Command("bash", "-c", "curl -sI "+svcRoute).Output()
if svcErr != nil {
e2e.Logf("Wait for service to be accessible through route, %v", svcErr)
}
return string(routeCurlOutput)
}, "15s", "5s").Should(o.ContainSubstring("200 OK"), fmt.Sprintf("Service inaccessible through route %s", string(routeCurlOutput)))
exutil.By("Create a network policy in namespace")
createResourceFromFile(oc, ns, allowFromAllNSNetworkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-all-namespaces"))
exutil.By("Access the route after network policy creation")
routeCurlOutput, svcErr = exec.Command("bash", "-c", "curl -sI "+svcRoute).Output()
o.Expect(svcErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(routeCurlOutput), "200 OK")).To(o.BeTrue())
})
// author: [email protected]
g.It("NonPreRelease-PreChkUpgrade-Author:asood-Critical-69236-Network policy in namespace that has long name is created successfully post upgrade", func() {
var (
testNs = "test-the-networkpolicy-with-a-62chars-62chars-long-namespace62"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowSameNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-same-namespace.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
helloStatefulsetFile = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a hello pod in the namespace")
podns := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Create a namespace with a long name")
oc.CreateSpecifiedNamespaceAsAdmin(testNs)
exutil.By("Create a hello pod in namespace that has long name")
createResourceFromFile(oc, testNs, helloStatefulsetFile)
podErr := waitForPodWithLabelReady(oc, testNs, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodName := getPodName(oc, testNs, "app=hello")[0]
exutil.By("Create a network policy in namespace")
createResourceFromFile(oc, testNs, allowSameNSNetworkPolicyFile)
output, err := oc.AsAdmin().Run("get").Args("networkpolicy", "-n", testNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-same-namespace"))
exutil.By("Verify the network policy in namespace with long name pre upgrade is functional ")
CurlPod2PodFail(oc, ns, "hello-pod", testNs, helloPodName)
})
g.It("NonPreRelease-PstChkUpgrade-Author:asood-Critical-69236-Network policy in namespace that has long name is created successfully post upgrade", func() {
var (
testNs = "test-the-networkpolicy-with-a-62chars-62chars-long-namespace62"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", testNs).Execute()
if nsErr != nil {
g.Skip("Skip the PstChkUpgrade test as test-the-networkpolicy-with-a-62chars-62chars-long-namespace62 namespace does not exist, PreChkUpgrade test did not run")
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testNs)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a hello pod in the namespace")
podns := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Verify the network policy in namespace with long name post upgrade is functional ")
podErr := waitForPodWithLabelReady(oc, testNs, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodName := getPodName(oc, testNs, "app=hello")[0]
CurlPod2PodFail(oc, ns, "hello-pod", testNs, helloPodName)
})
g.It("Author:asood-Low-75540-Network Policy Validation", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
networkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/netpol-30920-75540.yaml")
)
exutil.By("OCPBUGS-30920 Verify the network policy is not created with invalid value")
ns := oc.Namespace()
o.Expect(createResourceFromFileWithError(oc, ns, networkPolicyFile)).To(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:meinli-High-70009-Pod IP is missing from OVN DB AddressSet when using allow-namespace-only network policy", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowSameNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-same-namespace.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires 1 nodes, but the cluster has none")
}
exutil.By("1. Get namespace")
ns := oc.Namespace()
exutil.By("2. Create a network policy in namespace")
createResourceFromFile(oc, ns, allowSameNSNetworkPolicyFile)
output, err := oc.AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-same-namespace"))
ovnNodePod := ovnkubeNodePod(oc, nodeList.Items[0].Name)
o.Expect(ovnNodePod).NotTo(o.BeEmpty())
exutil.By("3. Check the acl from the port-group from the OVNK leader ovnkube-node")
listPGCmd := fmt.Sprintf("ovn-nbctl find port-group | grep -C 2 '%s\\:allow-same-namespace'", ns)
listPGCOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listPGCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listPGCOutput).NotTo(o.BeEmpty())
e2e.Logf("Output %s", listPGCOutput)
exutil.By("4. Check the addresses in ACL's address-set is empty")
var PGCMap map[string]string
PGCMap = nbContructToMap(listPGCOutput)
acls := strings.Split(strings.Trim(PGCMap["acls"], "[]"), ", ")
o.Expect(len(acls)).To(o.Equal(2))
listAclCmd := fmt.Sprintf("ovn-nbctl list acl %s", strings.Join(acls, " "))
listAclOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAclCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAclOutput).NotTo(o.BeEmpty())
regex := `\{\$(\w+)\}`
re := regexp.MustCompile(regex)
addrSetNames := re.FindAllString(listAclOutput, -1)
if len(addrSetNames) == 0 {
e2e.Fail("No matched address_set name found")
}
addrSetName := strings.Trim(addrSetNames[0], "{$}")
o.Expect(addrSetName).NotTo(o.BeEmpty())
listAddressSetCmd := fmt.Sprintf("ovn-nbctl list address_set %s", addrSetName)
listAddrOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAddressSetCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
var AddrMap map[string]string
AddrMap = nbContructToMap(listAddrOutput)
addrs := strings.Trim(AddrMap["addresses"], "[]")
o.Expect(addrs).To(o.BeEmpty())
exutil.By("5. Create a hello pod on non existent node")
nonexistNodeName := "doesnotexist-" + getRandomString()
pod1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nonexistNodeName,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
exutil.By("6. Verify address is not added to address-set")
listAddrOutput, listErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAddressSetCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
AddrMap = nbContructToMap(listAddrOutput)
addrs = strings.Trim(AddrMap["addresses"], "[]")
o.Expect(addrs).To(o.BeEmpty())
exutil.By("7. Delete the pods that did not reach running state and create it with valid node name")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1.name, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pod1.nodename = nodeList.Items[0].Name
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("8. Verify address is added to address-set")
listAddrOutput, listErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAddressSetCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
AddrMap = nbContructToMap(listAddrOutput)
addrs = strings.Trim(AddrMap["addresses"], "[\"]")
o.Expect(addrs).NotTo(o.BeEmpty())
ipStack := checkIPStackType(oc)
if (ipStack == "ipv6single") || (ipStack == "ipv4single") {
Pod1IP, _ := getPodIP(oc, ns, pod1.name)
o.Expect(addrs == Pod1IP).To(o.BeTrue())
} else {
_, Pod1IPv4 := getPodIP(oc, ns, pod1.name)
o.Expect(addrs == Pod1IPv4).To(o.BeTrue())
}
})
})
var _ = g.Describe("[sig-networking] SDN networkpolicy StressTest", func() {
//This case will only be run in perf stress ci which can be deployed for stress testing.
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-networkpolicy-stress", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
//author: [email protected]
g.It("Author:meinli-High-69234-high memory usage on ovnkube-master leader pods on some clusters when a network policy is deleted. [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ingressNPPolicyTemplate = filepath.Join(buildPruningBaseDir, "networkpolicy/generic-networkpolicy-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
master_port int32 = 8100
)
exutil.By("0. Get namespace.\n")
ns := oc.Namespace()
exutil.By("1. Get port from ovnk-master leader pod.\n")
ovnMasterPodName := getOVNKMasterPod(oc)
ovnMasterPodNames := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
var port string
var flag int
for i, ovnPod := range ovnMasterPodNames {
if ovnPod == ovnMasterPodName {
port = strconv.Itoa(int(master_port))
flag = i + 1
break
}
master_port++
}
exutil.By("2. Get initial pprof goroutine value from ovnk-master leader after enabling forwarding.\n")
cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("-n", "openshift-ovn-kubernetes", ovnMasterPodName, port+":29103", "--request-timeout=40s").Background()
o.Expect(err).NotTo(o.HaveOccurred())
defer cmd.Process.Kill()
output, err := exec.Command("bash", "-c", "ps -ef | grep 29103").Output()
e2e.Logf("output is: %s", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(ovnMasterPodName))
// wait port start listening
checkErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
checkOutput, _ := exec.Command("bash", "-c", "lsof -iTCP:"+port+" -sTCP:LISTEN").Output()
// no need to check error since some system output stderr for valid result
if len(checkOutput) != 0 {
return true, nil
}
e2e.Logf("Port is not listening, trying again...")
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Port cannot listen")
getGoroutineOut := "curl -ks --noproxy localhost http://localhost:" + port + "/debug/pprof/goroutine\\?debug\\=1 | grep -C 1 'periodicallyRetryResources' | awk 'NR==1{print $1}'"
PreGoroutineOut, err := exec.Command("bash", "-c", getGoroutineOut).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(PreGoroutineOut).NotTo(o.BeEmpty())
e2e.Logf("PreGoroutineOut is: %s", PreGoroutineOut)
exutil.By("3. Get initial ovnk-master pod memory usage.\n")
checkMemoryCmd := fmt.Sprintf("oc -n openshift-ovn-kubernetes adm top pod | sed '1d' | awk 'NR==%d{print $1,$3}'", flag)
checkMemory1, err := exec.Command("bash", "-c", checkMemoryCmd).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Repeat creating, deleting then recreating same network policy 15 times.\n")
networkPolicyResource := networkPolicyResource{
name: "ingress-networkpolicy",
namespace: ns,
policy: "ingress",
policyType: "Ingress",
direction1: "from",
namespaceSel1: "matchLabels",
namespaceSelKey1: matchLabelKey,
namespaceSelVal1: ns,
template: ingressNPPolicyTemplate,
}
for i := 0; i < 15; i++ {
// Create network policy
networkPolicyResource.createNetworkPolicy(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
// Delete network policy
removeResource(oc, true, true, "networkpolicy", networkPolicyResource.name, "-n", ns)
}
exutil.By("5. Compare the goroutine call value between pre and post output.\n")
PostGoroutineOut, err := exec.Command("bash", "-c", getGoroutineOut).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(PostGoroutineOut).NotTo(o.BeEmpty())
e2e.Logf("PostGoroutineOut is: %s", PostGoroutineOut)
o.Expect(string(PreGoroutineOut) == string(PostGoroutineOut)).To(o.BeTrue())
exutil.By("6. Verify ovnk-master pod memory usage should be the same as previous.\n")
// wait for ovnk-master leader pod to be stable
checkErr = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) {
checkMemory2, err := exec.Command("bash", "-c", checkMemoryCmd).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
if string(checkMemory2) == string(checkMemory1) {
e2e.Logf("Memory usage is the same as previous.")
return true, nil
}
e2e.Logf("%v,Waiting for ovnk-master pod stable, try again ...,", err)
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Check the memory usage timeout.")
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
5bac44c2-ae3d-4a73-9c98-ccca95970b07
|
Author:zzhao-Critical-49076-[FdpOvnOvs]-service domain can be resolved when egress type is enabled
|
['"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:zzhao-Critical-49076-[FdpOvnOvs]-service domain can be resolved when egress type is enabled", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloSdnFile = filepath.Join(buildPruningBaseDir, "hellosdn.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress-allow-all.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/ingress-allow-all.yaml")
)
g.By("create new namespace")
oc.SetupProject()
g.By("create test pods")
createResourceFromFile(oc, oc.Namespace(), testPodFile)
createResourceFromFile(oc, oc.Namespace(), helloSdnFile)
err := waitForPodWithLabelReady(oc, oc.Namespace(), "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
err = waitForPodWithLabelReady(oc, oc.Namespace(), "name=hellosdn")
exutil.AssertWaitPollNoErr(err, "this pod with label name=hellosdn not ready")
g.By("create egress and ingress type networkpolicy")
createResourceFromFile(oc, oc.Namespace(), egressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-all-egress"))
createResourceFromFile(oc, oc.Namespace(), ingressTypeFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-all-ingress"))
g.By("check hellosdn pods can reolsve the dns after apply the networkplicy")
helloSdnName := getPodName(oc, oc.Namespace(), "name=hellosdn")
digOutput, err := e2eoutput.RunHostCmd(oc.Namespace(), helloSdnName[0], "dig kubernetes.default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(digOutput).Should(o.ContainSubstring("Got answer"))
o.Expect(digOutput).ShouldNot(o.ContainSubstring("connection timed out"))
g.By("check test-pods can reolsve the dns after apply the networkplicy")
testPodName := getPodName(oc, oc.Namespace(), "name=test-pods")
digOutput, err = e2eoutput.RunHostCmd(oc.Namespace(), testPodName[0], "dig kubernetes.default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(digOutput).Should(o.ContainSubstring("Got answer"))
o.Expect(digOutput).ShouldNot(o.ContainSubstring("connection timed out"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
b6bb1307-9c40-4064-95a5-223c3dcf89d1
|
Author:huirwang-Critical-49186-[FdpOvnOvs] [Bug 2035336] Networkpolicy egress rule should work for statefulset pods.
|
['"net"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:huirwang-Critical-49186-[FdpOvnOvs] [Bug 2035336] Networkpolicy egress rule should work for statefulset pods.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloStatefulsetFile = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-egress-red.yaml")
)
g.By("1. Create first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("2. Create a statefulset pod in first namespace.")
createResourceFromFile(oc, ns1, helloStatefulsetFile)
err := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(err, "this pod with label app=hello not ready")
helloPodName := getPodName(oc, ns1, "app=hello")
g.By("3. Create networkpolicy with egress rule in first namespace.")
createResourceFromFile(oc, ns1, egressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-egress-to-red"))
g.By("4. Create second namespace.")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("5. Create test pods in second namespace.")
createResourceFromFile(oc, ns2, testPodFile)
err = waitForPodWithLabelReady(oc, oc.Namespace(), "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("6. Add label to first test pod in second namespace.")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "team=qe").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
testPodName := getPodName(oc, ns2, "name=test-pods")
err = exutil.LabelPod(oc, ns2, testPodName[0], "type=red")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("6. Get IP of the test pods in second namespace.")
testPodIP1 := getPodIPv4(oc, ns2, testPodName[0])
testPodIP2 := getPodIPv4(oc, ns2, testPodName[1])
g.By("7. Check networkpolicy works.")
output, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
_, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
g.By("8. Delete statefulset pod for a couple of times.")
for i := 0; i < 5; i++ {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", helloPodName[0], "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(err, "this pod with label app=hello not ready")
}
g.By("9. Again checking networkpolicy works.")
output, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP1, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
_, err = e2eoutput.RunHostCmd(ns1, helloPodName[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIP2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
c9d6d7cd-25dd-4e61-87cc-fc23d90a0372
|
Author:anusaxen-High-49437-[FdpOvnOvs] [BZ 2037647] Ingress network policy shouldn't be overruled by egress network policy on another pod
|
['"context"', '"net"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:anusaxen-High-49437-[FdpOvnOvs] [BZ 2037647] Ingress network policy shouldn't be overruled by egress network policy on another pod", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-allow-egress.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
g.By("Create first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("create a hello pod in first namespace")
podns1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns1.createPingPodNode(oc)
waitPodReady(oc, podns1.namespace, podns1.name)
g.By("create default allow egress type networkpolicy in first namespace")
createResourceFromFile(oc, ns1, egressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-allow-egress"))
g.By("Create Second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create a hello-pod on 2nd namesapce on same node as first namespace")
pod1Ns2 := pingPodResourceNode{
name: "hello-pod",
namespace: ns2,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1Ns2.createPingPodNode(oc)
waitPodReady(oc, pod1Ns2.namespace, pod1Ns2.name)
g.By("create another hello-pod on 2nd namesapce but on different node")
pod2Ns2 := pingPodResourceNode{
name: "hello-pod-other-node",
namespace: ns2,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2Ns2.createPingPodNode(oc)
waitPodReady(oc, pod2Ns2.namespace, pod2Ns2.name)
helloPodNameNs2 := getPodName(oc, ns2, "name=hello-pod")
g.By("create default deny ingress type networkpolicy in 2nd namespace")
createResourceFromFile(oc, ns2, ingressTypeFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
g.By("3. Get IP of the test pods in second namespace.")
hellopodIP1Ns2 := getPodIPv4(oc, ns2, helloPodNameNs2[0])
hellopodIP2Ns2 := getPodIPv4(oc, ns2, helloPodNameNs2[1])
g.By("4. Curl both ns2 pods from ns1.")
_, err = e2eoutput.RunHostCmd(ns1, podns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(hellopodIP1Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
_, err = e2eoutput.RunHostCmd(ns1, podns1.name, "curl --connect-timeout 5 -s "+net.JoinHostPort(hellopodIP2Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
94c5abad-bd75-4663-8515-4c42f293802b
|
NonHyperShiftHOST-Author:anusaxen-Medium-49686-[FdpOvnOvs] network policy with ingress rule with ipBlock
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonHyperShiftHOST-Author:anusaxen-Medium-49686-[FdpOvnOvs] network policy with ingress rule with ipBlock", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockIngressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-dual-CIDRs-template.yaml")
ipBlockIngressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-single-CIDR-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("create 3rd hello pod in ns1")
pod3ns1 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod3ns1.createPingPodNode(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
helloPod1ns1IPv6, helloPod1ns1IPv4 := getPodIP(oc, ns1, pod1ns1.name)
helloPod1ns1IPv4WithCidr := helloPod1ns1IPv4 + "/32"
helloPod1ns1IPv6WithCidr := helloPod1ns1IPv6 + "/128"
if ipStackType == "dualstack" {
g.By("create ipBlock Ingress Dual CIDRs Policy in ns1")
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress",
template: ipBlockIngressTemplateDual,
cidrIpv4: helloPod1ns1IPv4WithCidr,
cidrIpv6: helloPod1ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-ingress"))
} else {
// For singlestack getPodIP returns second parameter empty therefore use helloPod1ns1IPv6 variable but append it
// with CIDR based on stack.
var helloPod1ns1IPWithCidr string
if ipStackType == "ipv6single" {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6WithCidr
} else {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6 + "/32"
}
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress",
template: ipBlockIngressTemplateSingle,
cidr: helloPod1ns1IPWithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-ingress"))
}
g.By("Checking connectivity from pod1 to pod3")
CurlPod2PodPass(oc, ns1, "hello-pod1", ns1, "hello-pod3")
g.By("Checking connectivity from pod2 to pod3")
CurlPod2PodFail(oc, ns1, "hello-pod2", ns1, "hello-pod3")
g.By("Create 2nd namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create 1st hello pod in ns2")
pod1ns2 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns2,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod1ns2.createPingPodNode(oc)
waitPodReady(oc, pod1ns2.namespace, pod1ns2.name)
g.By("create 2nd hello pod in ns2")
pod2ns2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod2ns2.createPingPodNode(oc)
waitPodReady(oc, pod2ns2.namespace, pod2ns2.name)
g.By("Checking connectivity from pod1ns2 to pod3ns1")
CurlPod2PodFail(oc, ns2, "hello-pod1", ns1, "hello-pod3")
g.By("Checking connectivity from pod2ns2 to pod1ns1")
CurlPod2PodFail(oc, ns2, "hello-pod2", ns1, "hello-pod1")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
helloPod2ns2IPv6, helloPod2ns2IPv4 := getPodIP(oc, ns2, pod2ns2.name)
helloPod2ns2IPv4WithCidr := helloPod2ns2IPv4 + "/32"
helloPod2ns2IPv6WithCidr := helloPod2ns2IPv6 + "/128"
if ipStackType == "dualstack" {
g.By("create ipBlock Ingress Dual CIDRs Policy in ns1 again but with ipblock for pod2 ns2")
npIPBlockNS1New := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress",
template: ipBlockIngressTemplateDual,
cidrIpv4: helloPod2ns2IPv4WithCidr,
cidrIpv6: helloPod2ns2IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1New.createipBlockCIDRObjectDual(oc)
} else {
// For singlestack getPodIP returns second parameter empty therefore use helloPod2ns2IPv6 variable but append it
// with CIDR based on stack.
var helloPod2ns2IPWithCidr string
if ipStackType == "ipv6single" {
helloPod2ns2IPWithCidr = helloPod2ns2IPv6WithCidr
} else {
helloPod2ns2IPWithCidr = helloPod2ns2IPv6 + "/32"
}
npIPBlockNS1New := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress",
template: ipBlockIngressTemplateSingle,
cidr: helloPod2ns2IPWithCidr,
namespace: ns1,
}
npIPBlockNS1New.createipBlockCIDRObjectSingle(oc)
}
g.By("Checking connectivity from pod2 ns2 to pod3 ns1")
CurlPod2PodPass(oc, ns2, "hello-pod2", ns1, "hello-pod3")
g.By("Checking connectivity from pod1 ns2 to pod3 ns1")
CurlPod2PodFail(oc, ns2, "hello-pod1", ns1, "hello-pod3")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1 again so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1 again so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-ingress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Check connectivity works fine across all failed ones above to make sure all policy flows are cleared properly")
g.By("Checking connectivity from pod2ns1 to pod3ns1")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod3")
g.By("Checking connectivity from pod1ns2 to pod3ns1")
CurlPod2PodPass(oc, ns2, "hello-pod1", ns1, "hello-pod3")
g.By("Checking connectivity from pod2ns2 to pod1ns1 on IPv4 interface")
CurlPod2PodPass(oc, ns2, "hello-pod2", ns1, "hello-pod1")
})
| |||||
test case
|
openshift/openshift-tests-private
|
a86981e8-5ae6-4392-8ca4-18fc618eec26
|
Author:zzhao-Critical-49696-[FdpOvnOvs] mixed ingress and egress policies can work well
|
['"net"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:zzhao-Critical-49696-[FdpOvnOvs] mixed ingress and egress policies can work well", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloSdnFile = filepath.Join(buildPruningBaseDir, "hellosdn.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress_49696.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/ingress_49696.yaml")
)
g.By("create one namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("create test pods")
createResourceFromFile(oc, ns1, testPodFile)
createResourceFromFile(oc, ns1, helloSdnFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
err = waitForPodWithLabelReady(oc, ns1, "name=hellosdn")
exutil.AssertWaitPollNoErr(err, "this pod with label name=hellosdn not ready")
hellosdnPodNameNs1 := getPodName(oc, ns1, "name=hellosdn")
g.By("create egress type networkpolicy in ns1")
createResourceFromFile(oc, ns1, egressTypeFile)
g.By("create ingress type networkpolicy in ns1")
createResourceFromFile(oc, ns1, ingressTypeFile)
g.By("create second namespace")
oc.SetupProject()
ns2 := oc.Namespace()
g.By("create test pods in second namespace")
createResourceFromFile(oc, ns2, helloSdnFile)
err = waitForPodWithLabelReady(oc, ns2, "name=hellosdn")
exutil.AssertWaitPollNoErr(err, "this pod with label name=hellosdn not ready")
g.By("Get IP of the test pods in second namespace.")
hellosdnPodNameNs2 := getPodName(oc, ns2, "name=hellosdn")
hellosdnPodIP1Ns2 := getPodIPv4(oc, ns2, hellosdnPodNameNs2[0])
g.By("curl from ns1 hellosdn pod to ns2 pod")
_, err = e2eoutput.RunHostCmd(ns1, hellosdnPodNameNs1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(hellosdnPodIP1Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
e9ea7ff9-4065-4f68-adf0-b1a51e5d272e
|
Author:anusaxen-High-46246-[FdpOvnOvs] Network Policies should work with OVNKubernetes when traffic hairpins back to the same source through a service
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:anusaxen-High-46246-[FdpOvnOvs] Network Policies should work with OVNKubernetes when traffic hairpins back to the same source through a service", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allowfromsameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Create a namespace")
oc.SetupProject()
ns := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, ns, pod1.name)
g.By("create 2nd hello pod in same namespace but on different node")
pod2 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns, pod2.name)
g.By("Create a test service backing up both the above pods")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
svc.ipFamilyPolicy = "SingleStack"
svc.createServiceFromParams(oc)
g.By("create allow-from-same-namespace ingress networkpolicy in ns")
createResourceFromFile(oc, ns, allowfromsameNS)
g.By("curl from hello-pod1 to hello-pod2")
CurlPod2PodPass(oc, ns, "hello-pod1", ns, "hello-pod2")
g.By("curl from hello-pod2 to hello-pod1")
CurlPod2PodPass(oc, ns, "hello-pod2", ns, "hello-pod1")
for i := 0; i < 5; i++ {
g.By("curl from hello-pod1 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod1", "test-service")
g.By("curl from hello-pod2 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod2", "test-service")
}
g.By("Make sure pods are curl'able from respective nodes")
CurlNode2PodPass(oc, pod1.nodename, ns, "hello-pod1")
CurlNode2PodPass(oc, pod2.nodename, ns, "hello-pod2")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
g.By("Delete testservice from ns")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "test-service", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Checking pod to svc:port behavior now on with PreferDualStack Service")
svc.ipFamilyPolicy = "PreferDualStack"
svc.createServiceFromParams(oc)
for i := 0; i < 5; i++ {
g.By("curl from hello-pod1 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod1", "test-service")
g.By("curl from hello-pod2 to service:port")
CurlPod2SvcPass(oc, ns, ns, "hello-pod2", "test-service")
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
56070609-d4b8-4dea-bf22-e14edbf3f111
|
NonHyperShiftHOST-Author:huirwang-High-41879-[FdpOvnOvs] ipBlock should not ignore all other cidr's apart from the last one specified
|
['"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonHyperShiftHOST-Author:huirwang-High-41879-[FdpOvnOvs] ipBlock should not ignore all other cidr's apart from the last one specified ", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockIngressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-dual-multiple-CIDRs-template.yaml")
ipBlockIngressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-single-multiple-CIDRs-template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
ipStackType := checkIPStackType(oc)
if ipStackType == "ipv4single" {
g.Skip("This case requires dualstack or Single Stack IPv6 cluster")
}
g.By("Create a namespace")
oc.SetupProject()
ns1 := oc.Namespace()
g.By("create test pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("Scale test pods to 5")
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=5", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("Get 3 test pods's podname and IPs")
testPodName := getPodName(oc, ns1, "name=test-pods")
testPod1IPv6, testPod1IPv4 := getPodIP(oc, ns1, testPodName[0])
testPod1IPv4WithCidr := testPod1IPv4 + "/32"
testPod1IPv6WithCidr := testPod1IPv6 + "/128"
testPod2IPv6, testPod2IPv4 := getPodIP(oc, ns1, testPodName[1])
testPod2IPv4WithCidr := testPod2IPv4 + "/32"
testPod2IPv6WithCidr := testPod2IPv6 + "/128"
testPod3IPv6, testPod3IPv4 := getPodIP(oc, ns1, testPodName[2])
testPod3IPv4WithCidr := testPod3IPv4 + "/32"
testPod3IPv6WithCidr := testPod3IPv6 + "/128"
if ipStackType == "dualstack" {
g.By("create ipBlock Ingress Dual CIDRs Policy in ns1")
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress-41879",
template: ipBlockIngressTemplateDual,
cidrIpv4: testPod1IPv4WithCidr,
cidrIpv6: testPod1IPv6WithCidr,
cidr2Ipv4: testPod2IPv4WithCidr,
cidr2Ipv6: testPod2IPv6WithCidr,
cidr3Ipv4: testPod3IPv4WithCidr,
cidr3Ipv6: testPod3IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createIPBlockMultipleCIDRsObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-ingress-41879"))
} else {
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress-41879",
template: ipBlockIngressTemplateSingle,
cidr: testPod1IPv6WithCidr,
cidr2: testPod2IPv6WithCidr,
cidr3: testPod3IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createIPBlockMultipleCIDRsObjectSingle(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-ingress-41879"))
}
g.By("Checking connectivity from pod1 to pod5")
CurlPod2PodPass(oc, ns1, testPodName[0], ns1, testPodName[4])
g.By("Checking connectivity from pod2 to pod5")
CurlPod2PodPass(oc, ns1, testPodName[1], ns1, testPodName[4])
g.By("Checking connectivity from pod3 to pod5")
CurlPod2PodPass(oc, ns1, testPodName[2], ns1, testPodName[4])
g.By("Checking connectivity from pod4 to pod5")
CurlPod2PodFail(oc, ns1, testPodName[3], ns1, testPodName[4])
})
| |||||
test case
|
openshift/openshift-tests-private
|
bdf0136b-f3ad-4d46-9a86-b944ae761bd5
|
Author:asood-Medium-46807-[FdpOvnOvs] network policy with egress rule with ipBlock
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Medium-46807-[FdpOvnOvs] network policy with egress rule with ipBlock", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockEgressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-dual-CIDRs-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-single-CIDR-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("create 3rd hello pod in ns1")
pod3ns1 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod3ns1.createPingPodNode(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
helloPod1ns1IP1, helloPod1ns1IP2 := getPodIP(oc, ns1, pod1ns1.name)
if ipStackType == "dualstack" {
helloPod1ns1IPv6WithCidr := helloPod1ns1IP1 + "/128"
helloPod1ns1IPv4WithCidr := helloPod1ns1IP2 + "/32"
g.By("create ipBlock Egress Dual CIDRs Policy in ns1")
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-egress",
template: ipBlockEgressTemplateDual,
cidrIpv4: helloPod1ns1IPv4WithCidr,
cidrIpv6: helloPod1ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-egress"))
} else {
if ipStackType == "ipv6single" {
helloPod1ns1IPv6WithCidr := helloPod1ns1IP1 + "/128"
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPod1ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
} else {
helloPod1ns1IPv4WithCidr := helloPod1ns1IP1 + "/32"
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPod1ns1IPv4WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
}
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress"))
}
g.By("Checking connectivity from pod2 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
g.By("Checking connectivity from pod2 to pod3")
CurlPod2PodFail(oc, ns1, "hello-pod2", ns1, "hello-pod3")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-egress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-egress", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Check connectivity works fine across all failed ones above to make sure all policy flows are cleared properly")
g.By("Checking connectivity from pod2 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
g.By("Checking connectivity from pod2 to pod3")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod3")
})
| |||||
test case
|
openshift/openshift-tests-private
|
39332422-4941-4cc9-900f-5bb456d0d302
|
Author:asood-Medium-46808-[FdpOvnOvs] network policy with egress rule with ipBlock and except
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Medium-46808-[FdpOvnOvs] network policy with egress rule with ipBlock and except", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockEgressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-except-dual-CIDRs-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-except-single-CIDR-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("create 1st hello pod in ns1 on node[0]")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1 on node[0]")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("create 3rd hello pod in ns1 on node[1]")
pod3ns1 := pingPodResourceNode{
name: "hello-pod3",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod3ns1.createPingPodNode(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
g.By("create 4th hello pod in ns1 on node[1]")
pod4ns1 := pingPodResourceNode{
name: "hello-pod4",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod4ns1.createPingPodNode(oc)
waitPodReady(oc, pod4ns1.namespace, pod4ns1.name)
helloPod2ns1IP1, helloPod2ns1IP2 := getPodIP(oc, ns1, pod2ns1.name)
if ipStackType == "dualstack" {
hostSubnetCIDRIPv4, hostSubnetCIDRIPv6 := getNodeSubnetDualStack(oc, nodeList.Items[0].Name, "default")
o.Expect(hostSubnetCIDRIPv6).NotTo(o.BeEmpty())
o.Expect(hostSubnetCIDRIPv4).NotTo(o.BeEmpty())
helloPod2ns1IPv6WithCidr := helloPod2ns1IP1 + "/128"
helloPod2ns1IPv4WithCidr := helloPod2ns1IP2 + "/32"
g.By("create ipBlock Egress CIDRs with except rule Policy in ns1 on dualstack")
npIPBlockNS1 := ipBlockCIDRsExceptDual{
name: "ipblock-dual-cidrs-egress-except",
template: ipBlockEgressTemplateDual,
cidrIpv4: hostSubnetCIDRIPv4,
cidrIpv4Except: helloPod2ns1IPv4WithCidr,
cidrIpv6: hostSubnetCIDRIPv6,
cidrIpv6Except: helloPod2ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockExceptObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-dual-cidrs-egress-except"))
} else {
if ipStackType == "ipv6single" {
hostSubnetCIDRIPv6 := getNodeSubnet(oc, nodeList.Items[0].Name, "default")
o.Expect(hostSubnetCIDRIPv6).NotTo(o.BeEmpty())
helloPod2ns1IPv6WithCidr := helloPod2ns1IP1 + "/128"
g.By("create ipBlock Egress CIDRs with except rule Policy in ns1 on IPv6 singlestack")
npIPBlockNS1 := ipBlockCIDRsExceptSingle{
name: "ipblock-single-cidr-egress-except",
template: ipBlockEgressTemplateSingle,
cidr: hostSubnetCIDRIPv6,
except: helloPod2ns1IPv6WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockExceptObjectSingle(oc, true)
} else {
hostSubnetCIDRIPv4 := getNodeSubnet(oc, nodeList.Items[0].Name, "default")
o.Expect(hostSubnetCIDRIPv4).NotTo(o.BeEmpty())
helloPod2ns1IPv4WithCidr := helloPod2ns1IP1 + "/32"
g.By("create ipBlock Egress CIDRs with except rule Policy in ns1 on IPv4 singlestack")
npIPBlockNS1 := ipBlockCIDRsExceptSingle{
name: "ipblock-single-cidr-egress-except",
template: ipBlockEgressTemplateSingle,
cidr: hostSubnetCIDRIPv4,
except: helloPod2ns1IPv4WithCidr,
namespace: ns1,
}
npIPBlockNS1.createipBlockExceptObjectSingle(oc, true)
}
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-single-cidr-egress-except"))
}
g.By("Checking connectivity from pod3 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod1")
g.By("Checking connectivity from pod3 to pod2")
CurlPod2PodFail(oc, ns1, "hello-pod3", ns1, "hello-pod2")
g.By("Checking connectivity from pod3 to pod4")
CurlPod2PodFail(oc, ns1, "hello-pod3", ns1, "hello-pod4")
if ipStackType == "dualstack" {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-dual-cidrs-egress-except", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
g.By("Delete networkpolicy from ns1 so no networkpolicy in namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", "ipblock-single-cidr-egress-except", "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Check connectivity works fine across all failed ones above to make sure all policy flows are cleared properly")
g.By("Checking connectivity from pod3 to pod1")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod1")
g.By("Checking connectivity from pod3 to pod2")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod2")
g.By("Checking connectivity from pod3 to pod4")
CurlPod2PodPass(oc, ns1, "hello-pod3", ns1, "hello-pod4")
})
| |||||
test case
|
openshift/openshift-tests-private
|
122578a7-79e1-4f7e-b2e7-5317d4388465
|
Author:asood-Medium-41082-Check ACL audit logs can be extracted
|
['"context"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Medium-41082-Check ACL audit logs can be extracted", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromSameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("Enable ACL looging on the namespace ns1")
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "alert"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", ns1, aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("create default deny ingress networkpolicy in ns1")
createResourceFromFile(oc, ns1, ingressTypeFile)
g.By("create allow same namespace networkpolicy in ns1")
createResourceFromFile(oc, ns1, allowFromSameNS)
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("Checking connectivity from pod2 to pod1 to generate messages")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
output, err2 := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, "--path=ovn/acl-audit-log.log").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=allow")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
f890c0d9-2629-4ddd-b5e2-ef63f012eefd
|
Author:asood-Medium-41407-[FdpOvnOvs] Check networkpolicy ACL audit message is logged with correct policy name
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Medium-41407-[FdpOvnOvs] Check networkpolicy ACL audit message is logged with correct policy name", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromSameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
var namespaces [2]string
policyList := [2]string{"default-deny-ingress", "allow-from-same-namespace"}
for i := 0; i < 2; i++ {
namespaces[i] = oc.Namespace()
exutil.By(fmt.Sprintf("Enable ACL looging on the namespace %s", namespaces[i]))
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "warning"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", namespaces[i], aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By(fmt.Sprintf("Create default deny ingress networkpolicy in %s", namespaces[i]))
createResourceFromFile(oc, namespaces[i], ingressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(policyList[0]))
exutil.By(fmt.Sprintf("Create allow same namespace networkpolicy in %s", namespaces[i]))
createResourceFromFile(oc, namespaces[i], allowFromSameNS)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(policyList[1]))
pod := pingPodResourceNode{
name: "",
namespace: namespaces[i],
nodename: "",
template: pingPodNodeTemplate,
}
for j := 0; j < 2; j++ {
exutil.By(fmt.Sprintf("Create hello pod in %s", namespaces[i]))
pod.name = "hello-pod" + strconv.Itoa(j)
pod.nodename = nodeList.Items[j].Name
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
}
exutil.By(fmt.Sprintf("Checking connectivity from second pod to first pod to generate messages in %s", namespaces[i]))
CurlPod2PodPass(oc, namespaces[i], "hello-pod1", namespaces[i], "hello-pod0")
oc.SetupProject()
}
output, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[0].Name, "--path=ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ACL logs for allow-from-same-namespace policy \n %s", output)
// policy name truncated to allow-from-same-name in ACL log message
for i := 0; i < len(namespaces); i++ {
searchString := fmt.Sprintf("name=\"NP:%s:allow-from-same-name\", verdict=allow, severity=warning", namespaces[i])
o.Expect(strings.Contains(output, searchString)).To(o.BeTrue())
removeResource(oc, true, true, "networkpolicy", policyList[1], "-n", namespaces[i])
CurlPod2PodFail(oc, namespaces[i], "hello-pod0", namespaces[i], "hello-pod1")
}
output, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", nodeList.Items[1].Name, "--path=ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ACL logs for default-deny-ingress policy \n %s", output)
for i := 0; i < len(namespaces); i++ {
searchString := fmt.Sprintf("name=\"NP:%s:Ingress\", verdict=drop, severity=alert", namespaces[i])
o.Expect(strings.Contains(output, searchString)).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
e1e590c2-dd7b-45c8-a527-a47fc5fc2717
|
Author:asood-NonPreRelease-Longduration-WRS-Medium-41080-V-BR.33-Check network policy ACL audit messages are logged to journald
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-NonPreRelease-Longduration-WRS-Medium-41080-V-BR.33-Check network policy ACL audit messages are logged to journald", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromSameNS = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
g.By("Configure audit message logging destination to journald")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfo := `{"spec":{"defaultNetwork":{"ovnKubernetesConfig":{"policyAuditConfig": {"destination": "libc"}}}}}`
undoPatchInfo := `{"spec":{"defaultNetwork":{"ovnKubernetesConfig":{"policyAuditConfig": {"destination": ""}}}}}`
defer func() {
_, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", undoPatchInfo, "--type=merge").Output()
o.Expect(patchErr).NotTo(o.HaveOccurred())
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
}()
_, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(patchSResource, "-p", patchInfo, "--type=merge").Output()
o.Expect(patchErr).NotTo(o.HaveOccurred())
//Network operator needs to recreate the pods on a merge request, therefore give it enough time.
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
g.By("Obtain the namespace")
ns1 := oc.Namespace()
g.By("Enable ACL looging on the namespace ns1")
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "alert"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", ns1, aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("create default deny ingress networkpolicy in ns1")
createResourceFromFile(oc, ns1, ingressTypeFile)
g.By("create allow same namespace networkpolicy in ns1")
createResourceFromFile(oc, ns1, allowFromSameNS)
g.By("create 1st hello pod in ns1")
pod1ns1 := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns1.createPingPodNode(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
g.By("create 2nd hello pod in ns1")
pod2ns1 := pingPodResourceNode{
name: "hello-pod2",
namespace: ns1,
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns1.createPingPodNode(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
g.By("Checking connectivity from pod2 to pod1 to generate messages")
CurlPod2PodPass(oc, ns1, "hello-pod2", ns1, "hello-pod1")
g.By("Checking messages are logged to journald")
cmd := fmt.Sprintf("journalctl -t ovn-controller --since '1min ago'| grep 'verdict=allow'")
output, journalctlErr := exutil.DebugNodeWithOptionsAndChroot(oc, nodeList.Items[0].Name, []string{"-q"}, "bin/sh", "-c", cmd)
e2e.Logf("Output %s", output)
o.Expect(journalctlErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=allow")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
2d00c6c5-960b-4fd4-8989-f9d3e6fcff38
|
NonHyperShiftHOST-Author:anusaxen-Medium-55287-[FdpOvnOvs] Default network policy ACLs to a namespace should not be present with arp but arp||nd for ARPAllowPolicies
|
['"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonHyperShiftHOST-Author:anusaxen-Medium-55287-[FdpOvnOvs] Default network policy ACLs to a namespace should not be present with arp but arp||nd for ARPAllowPolicies", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
)
g.By("This is for BZ 2095852")
g.By("create new namespace")
oc.SetupProject()
g.By("create test pods")
createResourceFromFile(oc, oc.Namespace(), testPodFile)
err := waitForPodWithLabelReady(oc, oc.Namespace(), "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("create ingress default-deny type networkpolicy")
createResourceFromFile(oc, oc.Namespace(), ingressTypeFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny"))
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
g.By("get ACLs related to ns")
//list ACLs only related namespace in test
listACLCmd := "ovn-nbctl list ACL | grep -C 5 " + "NP:" + oc.Namespace() + " | grep -C 5 type=arpAllow"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
e2e.Logf("Output %s", listOutput)
o.Expect(listOutput).To(o.ContainSubstring("&& (arp || nd)"))
o.Expect(listOutput).ShouldNot(o.ContainSubstring("&& arp"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
3ee98bcb-66cf-4f5c-ba80-21c0ea162bee
|
NonHyperShiftHOST-Author:huirwang-High-62524-[FdpOvnOvs] OVN address_set referenced in acl should not miss when networkpolicy name includes dot.
|
['"path/filepath"', '"regexp"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonHyperShiftHOST-Author:huirwang-High-62524-[FdpOvnOvs] OVN address_set referenced in acl should not miss when networkpolicy name includes dot.", func() {
// This is for customer bug https://issues.redhat.com/browse/OCPBUGS-4085
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
networkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress-ingress-62524.yaml")
)
g.By("Check cluster network type")
g.By("Get namespace")
ns := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "team-").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, "team=openshift-networking").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create test pods")
createResourceFromFile(oc, ns, testPodFile)
err = waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testPod := getPodName(oc, ns, "name=test-pods")
g.By("Create a pod ")
pod1 := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
g.By("create egress-ingress type networkpolicy")
createResourceFromFile(oc, ns, networkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("egress-ingress-62524.test"))
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
g.By("Verify the address_set exists for the specific acl")
//list ACLs related to the networkpolicy name
listACLCmd := "ovn-nbctl --data=bare --no-heading --format=table find acl | grep egress-ingress-62524.test"
listOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listOutput).NotTo(o.BeEmpty())
// Get the address set name from the acls
regex := `\{\$(\w+)\}`
re := regexp.MustCompile(regex)
matches := re.FindAllStringSubmatch(listOutput, -1)
if len(matches) == 0 {
e2e.Fail("No matched address_set name found")
}
var result []string
for _, match := range matches {
if len(match) == 2 { // Check if a match was found
result = append(result, match[1]) // Append the captured group to the result slice
}
}
if len(result) == 0 {
e2e.Fail("No matched address_set name found")
}
//Check adress_set can be found when ovn-nbctl list address_set
for _, addrSetName := range result {
listAddressSetCmd := "ovn-nbctl --no-leader-only list address_set | grep " + addrSetName
listAddrOutput, listAddrErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, listAddressSetCmd)
o.Expect(listAddrErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
}
g.By("Checking pods connectivity")
CurlPod2PodPass(oc, ns, testPod[0], ns, pod1.name)
CurlPod2PodFail(oc, ns, testPod[0], ns, testPod[1])
})
| |||||
test case
|
openshift/openshift-tests-private
|
7cc94026-22fb-4440-9158-22bd0a99ad42
|
NonHyperShiftHOST-Author:asood-Critical-65901-[FdpOvnOvs] Duplicate transactions should not be executed for network policy for every pod update.
|
['"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonHyperShiftHOST-Author:asood-Critical-65901-[FdpOvnOvs] Duplicate transactions should not be executed for network policy for every pod update.", func() {
// Customer https://issues.redhat.com/browse/OCPBUGS-4659
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
networkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-ingress-red.yaml")
testPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
exutil.By("Obtain the namespace")
ns := oc.Namespace()
exutil.By("Create a pod in namespace")
pod := pingPodResource{
name: "test-pod",
namespace: ns,
template: testPodTemplate,
}
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
_, labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", pod.namespace, "pod", pod.name, "type=red").Output()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("Create a network policy")
createResourceFromFile(oc, ns, networkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-ingress-to-red"))
exutil.By("Obtain the transaction count to be 1")
podIP1, _ := getPodIP(oc, ns, pod.name)
podNodeName, podNodenameErr := exutil.GetPodNodeName(oc, ns, pod.name)
o.Expect(podNodeName).NotTo(o.BeEmpty())
o.Expect(podNodenameErr).NotTo(o.HaveOccurred())
e2e.Logf("Node on which pod %s is running %s", pod.name, podNodeName)
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", podNodeName)
o.Expect(ovnKNodePod).NotTo(o.BeEmpty())
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, podNodeName)
getCmd := fmt.Sprintf("cat /var/log/ovnkube/libovsdb.log | grep 'transacting operations' | grep '%s' ", podIP1)
logContents, logErr1 := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", getCmd)
o.Expect(logErr1).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("Log content before label update \n %s", logContents))
logLinesCount := len(strings.Split(logContents, "\n")) - 1
exutil.By("Label the pods to see transaction count is unchanged")
_, reLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", pod.namespace, "--overwrite", "pod", pod.name, "type=blue").Output()
o.Expect(reLabelErr).NotTo(o.HaveOccurred())
newLogContents, logErr2 := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", getCmd)
o.Expect(logErr2).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("Log content after label update \n %s", newLogContents))
newLogLinesCount := len(strings.Split(newLogContents, "\n")) - 1
o.Expect(logLinesCount).To(o.Equal(newLogLinesCount))
})
| |||||
test case
|
openshift/openshift-tests-private
|
5a83e9f6-d4f5-4a4e-8f23-c4bc64b27fad
|
Author:asood-High-66085-[FdpOvnOvs] Creating egress network policies for allowing to same namespace and openshift dns in namespace prevents the pod from reaching its own service
|
['"fmt"', '"path/filepath"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-High-66085-[FdpOvnOvs] Creating egress network policies for allowing to same namespace and openshift dns in namespace prevents the pod from reaching its own service", func() {
// https://issues.redhat.com/browse/OCPBUGS-4909
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
allowToNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-to-same-namespace.yaml")
allowToDNSNPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-to-openshift-dns.yaml")
podsInProject = []string{"hello-pod-1", "other-pod"}
svcURL string
)
exutil.By("Get first namespace and create another")
ns := oc.Namespace()
exutil.By("Create set of pods with different labels")
for _, podItem := range podsInProject {
pod1 := pingPodResource{
name: podItem,
namespace: ns,
template: pingPodTemplate,
}
pod1.createPingPod(oc)
waitPodReady(oc, ns, pod1.name)
}
exutil.By("Label the pods to ensure the pod does not serve the service")
_, reLabelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", ns, "--overwrite", "pod", podsInProject[1], "name=other-pod").Output()
o.Expect(reLabelErr).NotTo(o.HaveOccurred())
exutil.By("Create a service for one of the pods")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "SingleStack",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
svc.createServiceFromParams(oc)
exutil.By("Check service status")
svcOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svcOutput).Should(o.ContainSubstring(svc.servicename))
exutil.By("Obtain the service URL")
svcURL = fmt.Sprintf("http://%s.%s.svc:27017", svc.servicename, svc.namespace)
e2e.Logf("Service URL %s", svcURL)
exutil.By("Check the connectivity to service from the pods in the namespace")
for _, podItem := range podsInProject {
output, err := e2eoutput.RunHostCmd(ns, podItem, "curl --connect-timeout 5 -s "+svcURL)
o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create the network policies in the namespace")
exutil.By("Create the allow to same namespace policy in the namespace")
createResourceFromFile(oc, ns, allowToNSNetworkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-same-namespace"))
exutil.By("Create the allow to DNS policy in the namespace")
createResourceFromFile(oc, ns, allowToDNSNPolicyFile)
output, err = oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-openshift-dns"))
exutil.By("Create another pod to serve the service")
anotherPod := pingPodResource{
name: "hello-pod-2",
namespace: ns,
template: pingPodTemplate,
}
anotherPod.createPingPod(oc)
waitPodReady(oc, ns, anotherPod.name)
podsInProject = append(podsInProject, anotherPod.name)
exutil.By("Check the connectivity to service again from the pods in the namespace")
for _, eachPod := range podsInProject {
output, err := e2eoutput.RunHostCmd(ns, eachPod, "curl --connect-timeout 5 -s "+svcURL)
o.Expect(strings.Contains(output, "Hello OpenShift!")).To(o.BeTrue())
o.Expect(err).NotTo(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
93e1764e-4460-482b-ad10-02483e854a4b
|
Author:asood-Medium-64787-[FdpOvnOvs] Network policy with duplicate egress rules (same CIDR block) fails to be recreated [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Medium-64787-[FdpOvnOvs] Network policy with duplicate egress rules (same CIDR block) fails to be recreated [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-5835
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipBlockEgressTemplateDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-dual-multiple-CIDRs-template.yaml")
ipBlockEgressTemplateSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-egress-single-multiple-CIDRs-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Obtain the namespace")
ns := oc.Namespace()
exutil.By("create a hello pod in namspace")
podns := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
podns.createPingPodNode(oc)
waitPodReady(oc, podns.namespace, podns.name)
helloPodnsIP1, helloPodnsIP2 := getPodIP(oc, ns, podns.name)
var policyName string
if ipStackType == "dualstack" {
helloPodnsIPv6WithCidr := helloPodnsIP1 + "/128"
helloPodnsIPv4WithCidr := helloPodnsIP2 + "/32"
exutil.By("Create ipBlock Egress Dual with multiple CIDRs Policy in namespace")
npIPBlockNS := ipBlockCIDRsDual{
name: "ipblock-dual-multiple-cidrs-egress",
template: ipBlockEgressTemplateDual,
cidrIpv4: helloPodnsIPv4WithCidr,
cidrIpv6: helloPodnsIPv6WithCidr,
cidr2Ipv4: helloPodnsIPv4WithCidr,
cidr2Ipv6: helloPodnsIPv6WithCidr,
cidr3Ipv4: helloPodnsIPv4WithCidr,
cidr3Ipv6: helloPodnsIPv6WithCidr,
namespace: ns,
}
npIPBlockNS.createIPBlockMultipleCIDRsObjectDual(oc)
output, err := oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(npIPBlockNS.name))
policyName = npIPBlockNS.name
} else {
var npIPBlockNS ipBlockCIDRsSingle
if ipStackType == "ipv6single" {
helloPodnsIPv6WithCidr := helloPodnsIP1 + "/128"
npIPBlockNS = ipBlockCIDRsSingle{
name: "ipblock-single-multiple-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPodnsIPv6WithCidr,
cidr2: helloPodnsIPv6WithCidr,
cidr3: helloPodnsIPv6WithCidr,
namespace: ns,
}
} else {
helloPodnsIPv4WithCidr := helloPodnsIP1 + "/32"
npIPBlockNS = ipBlockCIDRsSingle{
name: "ipblock-single-multiple-cidr-egress",
template: ipBlockEgressTemplateSingle,
cidr: helloPodnsIPv4WithCidr,
cidr2: helloPodnsIPv4WithCidr,
cidr3: helloPodnsIPv4WithCidr,
namespace: ns,
}
}
npIPBlockNS.createIPBlockMultipleCIDRsObjectSingle(oc)
output, err := oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(npIPBlockNS.name))
policyName = npIPBlockNS.name
}
exutil.By("Delete the ovnkube node pod on the node")
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnKNodePod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for new ovnkube-node pod recreated on the node")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
exutil.By("Check for error message related network policy")
e2e.Logf("ovnkube-node new podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
filterString := fmt.Sprintf(" %s/%s ", ns, policyName)
e2e.Logf("Filter String %s", filterString)
logContents, logErr := exutil.GetSpecificPodLogs(oc, "openshift-ovn-kubernetes", "ovnkube-controller", ovnKNodePod, filterString)
o.Expect(logErr).NotTo(o.HaveOccurred())
e2e.Logf("Log contents \n%s", logContents)
o.Expect(strings.Contains(logContents, "failed")).To(o.BeFalse())
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.