element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
eccb2f2b-ba56-4f1f-9a57-c43f6ca940ab
|
Author:asood-Critical-64786-[FdpOvnOvs] Network policy in namespace that has long name fails to be recreated as the ACLs are considered duplicate [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Critical-64786-[FdpOvnOvs] Network policy in namespace that has long name fails to be recreated as the ACLs are considered duplicate [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-15371
var (
testNs = "test-64786networkpolicy-with-a-62chars-62chars-long-namespace62"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowToNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-to-same-namespace.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) == 0).NotTo(o.BeTrue())
exutil.By("Create a namespace with a long name")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("project", testNs, "--ignore-not-found").Execute()
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
nsCreateErr := oc.WithoutNamespace().Run("new-project").Args(testNs).Execute()
o.Expect(nsCreateErr).NotTo(o.HaveOccurred())
exutil.By("Create a hello pod in namspace")
podns := pingPodResource{
name: "hello-pod",
namespace: testNs,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Create a network policy in namespace")
createResourceFromFile(oc, testNs, allowToNSNetworkPolicyFile)
checkErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
output, err := oc.WithoutNamespace().Run("get").Args("networkpolicy", "-n", testNs).Output()
if err != nil {
e2e.Logf("%v,Waiting for policy to be created, try again ...,", err)
return false, nil
}
// Check network policy
if strings.Contains(output, "allow-to-same-namespace") {
e2e.Logf("Network policy created")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Network policy could not be created")
exutil.By("Delete the ovnkube node pod on the node")
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnKNodePod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for new ovnkube-node pod recreated on the node")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
exutil.By("Check for error message related network policy")
e2e.Logf("ovnkube-node new podname %s running on node %s", ovnKNodePod, nodeList.Items[0].Name)
filterString := fmt.Sprintf(" %s/%s ", testNs, "allow-to-same-namespace")
e2e.Logf("Filter String %s", filterString)
logContents, logErr := exutil.GetSpecificPodLogs(oc, "openshift-ovn-kubernetes", "ovnkube-controller", ovnKNodePod, filterString)
o.Expect(logErr).NotTo(o.HaveOccurred())
e2e.Logf("Log contents \n%s", logContents)
o.Expect(strings.Contains(logContents, "failed")).To(o.BeFalse())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e735eb05-411e-4f89-9da9-81eea0d7af44
|
NonHyperShiftHOST-Author:asood-High-64788-[FdpOvnOvs] Same network policies across multiple namespaces fail to be recreated [Disruptive].
|
['"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonHyperShiftHOST-Author:asood-High-64788-[FdpOvnOvs] Same network policies across multiple namespaces fail to be recreated [Disruptive].", func() {
// This is for customer bug https://issues.redhat.com/browse/OCPBUGS-11447
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
networkPolicyFileSingle = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-single-CIDR-template.yaml")
networkPolicyFileDual = filepath.Join(buildPruningBaseDir, "networkpolicy/ipblock/ipBlock-ingress-dual-CIDRs-template.yaml")
policyName = "ipblock-64788"
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a test pods")
createResourceFromFile(oc, ns, testPodFile)
err := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "The pod with label name=test-pods is not ready")
testPod := getPodName(oc, ns, "name=test-pods")
nodeName, err := exutil.GetPodNodeName(oc, ns, testPod[0])
o.Expect(err).NotTo(o.HaveOccurred())
helloPod1ns1IPv6, helloPod1ns1IPv4 := getPodIP(oc, ns, testPod[0])
helloPod1ns1IPv4WithCidr := helloPod1ns1IPv4 + "/32"
helloPod1ns1IPv6WithCidr := helloPod1ns1IPv6 + "/128"
exutil.By("Create ipBlock Ingress CIDRs Policy in namespace")
if ipStackType == "dualstack" {
npIPBlockNS1 := ipBlockCIDRsDual{
name: policyName,
template: networkPolicyFileDual,
cidrIpv4: helloPod1ns1IPv4WithCidr,
cidrIpv6: helloPod1ns1IPv6WithCidr,
namespace: ns,
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
} else {
// For singlestack getPodIP returns second parameter empty therefore use helloPod1ns1IPv6 variable but append it
// with CIDR based on stack.
var helloPod1ns1IPWithCidr string
if ipStackType == "ipv6single" {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6WithCidr
} else {
helloPod1ns1IPWithCidr = helloPod1ns1IPv6 + "/32"
}
npIPBlockNS1 := ipBlockCIDRsSingle{
name: policyName,
template: networkPolicyFileSingle,
cidr: helloPod1ns1IPWithCidr,
namespace: ns,
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
}
exutil.By("Check the policy has been created")
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(policyName))
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeName)
exutil.By("Get the ACL for the created policy")
//list ACLs related to the networkpolicy name
aclName := fmt.Sprintf("'NP:%s:%s:Ingres'", ns, policyName)
listACLCmd := fmt.Sprintf("ovn-nbctl find acl name='NP\\:%s\\:%s\\:Ingres'", ns, policyName)
listAclOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAclOutput).NotTo(o.BeEmpty())
e2e.Logf(listAclOutput)
var aclMap map[string]string
var listPGCmd string
//Dual stack has two ACLs for policy and uuid of both are needed to get port group
if ipStackType == "dualstack" {
listAcls := strings.Split(listAclOutput, "\n\n")
aclMap = nbContructToMap(listAcls[0])
o.Expect(len(aclMap)).NotTo(o.Equal(0))
aclMap1 := nbContructToMap(listAcls[1])
o.Expect(len(aclMap1)).NotTo(o.Equal(0))
listPGCmd = fmt.Sprintf("ovn-nbctl find port-group acls='[%s, %s]'", aclMap["_uuid"], aclMap1["_uuid"])
} else {
aclMap = nbContructToMap(listAclOutput)
o.Expect(len(aclMap)).NotTo(o.Equal(0))
listPGCmd = fmt.Sprintf("ovn-nbctl find port-group acls='[%s]'", aclMap["_uuid"])
}
aclMap["name"] = aclName
exutil.By("Get the port group for the created policy")
listPGOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listPGCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listPGOutput).NotTo(o.BeEmpty())
e2e.Logf(listPGOutput)
pgMap := nbContructToMap(listPGOutput)
o.Expect(len(pgMap)).NotTo(o.Equal(0))
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", policyName, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a duplicate ACL")
createAclCmd := fmt.Sprintf("ovn-nbctl --id=@copyacl create acl name=copyacl direction=%s action=%s -- add port_group %s acl @copyacl", aclMap["direction"], aclMap["action"], pgMap["_uuid"])
idOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", createAclCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(idOutput).NotTo(o.BeEmpty())
e2e.Logf(idOutput)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", policyName, "-n", ns).Execute()
exutil.By("Set properties of duplicate ACL")
setAclPropertiesCmd := fmt.Sprintf("ovn-nbctl set acl %s match='%s' priority=%s meter=%s", idOutput, aclMap["match"], aclMap["priority"], aclMap["meter"])
_, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", setAclPropertiesCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("networkpolicy", policyName, "-n", ns).Execute()
exutil.By("Set name of duplicate ACL")
dupAclName := fmt.Sprintf("'NP\\:%s\\:%s\\:Ingre0'", ns, policyName)
setAclNameCmd := fmt.Sprintf("ovn-nbctl set acl %s name=%s", idOutput, dupAclName)
_, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", setAclNameCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
exutil.By("Check duplicate ACL is created successfully")
listDupACLCmd := fmt.Sprintf("ovn-nbctl find acl name='NP\\:%s\\:%s\\:Ingre0'", ns, policyName)
listDupAclOutput, listErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listDupACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listDupAclOutput).NotTo(o.BeEmpty())
e2e.Logf(listDupAclOutput)
exutil.By("Delete the ovnkube node pod on the node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
e2e.Logf("ovnkube-node podname %s running on node %s", ovnKNodePod, nodeName)
defer waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", ovnKNodePod, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for new ovnkube-node pod to be recreated on the node")
waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
exutil.By("Check the duplicate ACL is removed")
listAclOutput, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAclOutput).NotTo(o.BeEmpty(), listAclOutput)
listDupAclOutput, listErr = exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", listDupACLCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listDupAclOutput).To(o.BeEmpty())
})
| |||||
test case
|
openshift/openshift-tests-private
|
aaf30103-f1c8-4228-9d39-a163a734c613
|
Author:asood-Medium-68660-[FdpOvnOvs] Exposed route of the service should be accessible when allowing inbound traffic from any namespace network policy is created.
|
['"fmt"', '"os/exec"', '"path/filepath"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Medium-68660-[FdpOvnOvs] Exposed route of the service should be accessible when allowing inbound traffic from any namespace network policy is created.", func() {
// https://issues.redhat.com/browse/OCPBUGS-14632
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowFromAllNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-all-namespaces.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
serviceName = "test-service-68660"
)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a hello pod in namspace")
podns := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Create a test service which is in front of the above pod")
svc := genericServiceResource{
servicename: serviceName,
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "PreferDualStack",
internalTrafficPolicy: "Local",
externalTrafficPolicy: "",
template: genericServiceTemplate,
}
svc.createServiceFromParams(oc)
exutil.By("Expose the service through a route")
err := oc.AsAdmin().WithoutNamespace().Run("expose").Args("svc", serviceName, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svcRoute, routeErr := oc.AsAdmin().Run("get").Args("route", serviceName, "-n", ns, "-o=jsonpath={.spec.host}").Output()
o.Expect(routeErr).NotTo(o.HaveOccurred())
o.Expect(svcRoute).ShouldNot(o.Equal(""))
exutil.By("Access the route before network policy creation")
var svcErr error
var routeCurlOutput []byte
o.Eventually(func() string {
routeCurlOutput, svcErr = exec.Command("bash", "-c", "curl -sI "+svcRoute).Output()
if svcErr != nil {
e2e.Logf("Wait for service to be accessible through route, %v", svcErr)
}
return string(routeCurlOutput)
}, "15s", "5s").Should(o.ContainSubstring("200 OK"), fmt.Sprintf("Service inaccessible through route %s", string(routeCurlOutput)))
exutil.By("Create a network policy in namespace")
createResourceFromFile(oc, ns, allowFromAllNSNetworkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-all-namespaces"))
exutil.By("Access the route after network policy creation")
routeCurlOutput, svcErr = exec.Command("bash", "-c", "curl -sI "+svcRoute).Output()
o.Expect(svcErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(routeCurlOutput), "200 OK")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
efe24297-1c78-4985-98bd-ace5d96810ad
|
NonPreRelease-PreChkUpgrade-Author:asood-Critical-69236-Network policy in namespace that has long name is created successfully post upgrade
|
['"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonPreRelease-PreChkUpgrade-Author:asood-Critical-69236-Network policy in namespace that has long name is created successfully post upgrade", func() {
var (
testNs = "test-the-networkpolicy-with-a-62chars-62chars-long-namespace62"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowSameNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-same-namespace.yaml")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
helloStatefulsetFile = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a hello pod in the namespace")
podns := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Create a namespace with a long name")
oc.CreateSpecifiedNamespaceAsAdmin(testNs)
exutil.By("Create a hello pod in namespace that has long name")
createResourceFromFile(oc, testNs, helloStatefulsetFile)
podErr := waitForPodWithLabelReady(oc, testNs, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodName := getPodName(oc, testNs, "app=hello")[0]
exutil.By("Create a network policy in namespace")
createResourceFromFile(oc, testNs, allowSameNSNetworkPolicyFile)
output, err := oc.AsAdmin().Run("get").Args("networkpolicy", "-n", testNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-same-namespace"))
exutil.By("Verify the network policy in namespace with long name pre upgrade is functional ")
CurlPod2PodFail(oc, ns, "hello-pod", testNs, helloPodName)
})
| |||||
test case
|
openshift/openshift-tests-private
|
270fd5e0-c066-4598-be79-daa36dc85cc2
|
NonPreRelease-PstChkUpgrade-Author:asood-Critical-69236-Network policy in namespace that has long name is created successfully post upgrade
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("NonPreRelease-PstChkUpgrade-Author:asood-Critical-69236-Network policy in namespace that has long name is created successfully post upgrade", func() {
var (
testNs = "test-the-networkpolicy-with-a-62chars-62chars-long-namespace62"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", testNs).Execute()
if nsErr != nil {
g.Skip("Skip the PstChkUpgrade test as test-the-networkpolicy-with-a-62chars-62chars-long-namespace62 namespace does not exist, PreChkUpgrade test did not run")
}
defer oc.DeleteSpecifiedNamespaceAsAdmin(testNs)
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create a hello pod in the namespace")
podns := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
podns.createPingPod(oc)
waitPodReady(oc, podns.namespace, podns.name)
exutil.By("Verify the network policy in namespace with long name post upgrade is functional ")
podErr := waitForPodWithLabelReady(oc, testNs, "app=hello")
exutil.AssertWaitPollNoErr(podErr, "The statefulSet pod is not ready")
helloPodName := getPodName(oc, testNs, "app=hello")[0]
CurlPod2PodFail(oc, ns, "hello-pod", testNs, helloPodName)
})
| |||||
test case
|
openshift/openshift-tests-private
|
b1f55709-b9df-4252-8859-320ecab6e393
|
Author:asood-Low-75540-Network Policy Validation
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:asood-Low-75540-Network Policy Validation", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
networkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/netpol-30920-75540.yaml")
)
exutil.By("OCPBUGS-30920 Verify the network policy is not created with invalid value")
ns := oc.Namespace()
o.Expect(createResourceFromFileWithError(oc, ns, networkPolicyFile)).To(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
5988fbe1-2313-4f5b-83ad-568f0c544c39
|
Author:meinli-High-70009-Pod IP is missing from OVN DB AddressSet when using allow-namespace-only network policy
|
['"context"', '"fmt"', '"path/filepath"', '"regexp"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:meinli-High-70009-Pod IP is missing from OVN DB AddressSet when using allow-namespace-only network policy", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
allowSameNSNetworkPolicyFile = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-same-namespace.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires 1 nodes, but the cluster has none")
}
exutil.By("1. Get namespace")
ns := oc.Namespace()
exutil.By("2. Create a network policy in namespace")
createResourceFromFile(oc, ns, allowSameNSNetworkPolicyFile)
output, err := oc.AsAdmin().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-same-namespace"))
ovnNodePod := ovnkubeNodePod(oc, nodeList.Items[0].Name)
o.Expect(ovnNodePod).NotTo(o.BeEmpty())
exutil.By("3. Check the acl from the port-group from the OVNK leader ovnkube-node")
listPGCmd := fmt.Sprintf("ovn-nbctl find port-group | grep -C 2 '%s\\:allow-same-namespace'", ns)
listPGCOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listPGCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listPGCOutput).NotTo(o.BeEmpty())
e2e.Logf("Output %s", listPGCOutput)
exutil.By("4. Check the addresses in ACL's address-set is empty")
var PGCMap map[string]string
PGCMap = nbContructToMap(listPGCOutput)
acls := strings.Split(strings.Trim(PGCMap["acls"], "[]"), ", ")
o.Expect(len(acls)).To(o.Equal(2))
listAclCmd := fmt.Sprintf("ovn-nbctl list acl %s", strings.Join(acls, " "))
listAclOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAclCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAclOutput).NotTo(o.BeEmpty())
regex := `\{\$(\w+)\}`
re := regexp.MustCompile(regex)
addrSetNames := re.FindAllString(listAclOutput, -1)
if len(addrSetNames) == 0 {
e2e.Fail("No matched address_set name found")
}
addrSetName := strings.Trim(addrSetNames[0], "{$}")
o.Expect(addrSetName).NotTo(o.BeEmpty())
listAddressSetCmd := fmt.Sprintf("ovn-nbctl list address_set %s", addrSetName)
listAddrOutput, listErr := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAddressSetCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
var AddrMap map[string]string
AddrMap = nbContructToMap(listAddrOutput)
addrs := strings.Trim(AddrMap["addresses"], "[]")
o.Expect(addrs).To(o.BeEmpty())
exutil.By("5. Create a hello pod on non existent node")
nonexistNodeName := "doesnotexist-" + getRandomString()
pod1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nonexistNodeName,
template: pingPodNodeTemplate,
}
pod1.createPingPodNode(oc)
exutil.By("6. Verify address is not added to address-set")
listAddrOutput, listErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAddressSetCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
AddrMap = nbContructToMap(listAddrOutput)
addrs = strings.Trim(AddrMap["addresses"], "[]")
o.Expect(addrs).To(o.BeEmpty())
exutil.By("7. Delete the pods that did not reach running state and create it with valid node name")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod1.name, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pod1.nodename = nodeList.Items[0].Name
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("8. Verify address is added to address-set")
listAddrOutput, listErr = exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnNodePod, listAddressSetCmd)
o.Expect(listErr).NotTo(o.HaveOccurred())
o.Expect(listAddrOutput).NotTo(o.BeEmpty())
AddrMap = nbContructToMap(listAddrOutput)
addrs = strings.Trim(AddrMap["addresses"], "[\"]")
o.Expect(addrs).NotTo(o.BeEmpty())
ipStack := checkIPStackType(oc)
if (ipStack == "ipv6single") || (ipStack == "ipv4single") {
Pod1IP, _ := getPodIP(oc, ns, pod1.name)
o.Expect(addrs == Pod1IP).To(o.BeTrue())
} else {
_, Pod1IPv4 := getPodIP(oc, ns, pod1.name)
o.Expect(addrs == Pod1IPv4).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
69856590-8916-40cd-8fac-c0a3a8845685
|
Author:meinli-High-69234-high memory usage on ovnkube-master leader pods on some clusters when a network policy is deleted. [Serial]
|
['"context"', '"fmt"', '"os/exec"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy.go
|
g.It("Author:meinli-High-69234-high memory usage on ovnkube-master leader pods on some clusters when a network policy is deleted. [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ingressNPPolicyTemplate = filepath.Join(buildPruningBaseDir, "networkpolicy/generic-networkpolicy-template.yaml")
matchLabelKey = "kubernetes.io/metadata.name"
master_port int32 = 8100
)
exutil.By("0. Get namespace.\n")
ns := oc.Namespace()
exutil.By("1. Get port from ovnk-master leader pod.\n")
ovnMasterPodName := getOVNKMasterPod(oc)
ovnMasterPodNames := getPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-control-plane")
var port string
var flag int
for i, ovnPod := range ovnMasterPodNames {
if ovnPod == ovnMasterPodName {
port = strconv.Itoa(int(master_port))
flag = i + 1
break
}
master_port++
}
exutil.By("2. Get initial pprof goroutine value from ovnk-master leader after enabling forwarding.\n")
cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("-n", "openshift-ovn-kubernetes", ovnMasterPodName, port+":29103", "--request-timeout=40s").Background()
o.Expect(err).NotTo(o.HaveOccurred())
defer cmd.Process.Kill()
output, err := exec.Command("bash", "-c", "ps -ef | grep 29103").Output()
e2e.Logf("output is: %s", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(ovnMasterPodName))
// wait port start listening
checkErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 100*time.Second, false, func(cxt context.Context) (bool, error) {
checkOutput, _ := exec.Command("bash", "-c", "lsof -iTCP:"+port+" -sTCP:LISTEN").Output()
// no need to check error since some system output stderr for valid result
if len(checkOutput) != 0 {
return true, nil
}
e2e.Logf("Port is not listening, trying again...")
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Port cannot listen")
getGoroutineOut := "curl -ks --noproxy localhost http://localhost:" + port + "/debug/pprof/goroutine\\?debug\\=1 | grep -C 1 'periodicallyRetryResources' | awk 'NR==1{print $1}'"
PreGoroutineOut, err := exec.Command("bash", "-c", getGoroutineOut).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(PreGoroutineOut).NotTo(o.BeEmpty())
e2e.Logf("PreGoroutineOut is: %s", PreGoroutineOut)
exutil.By("3. Get initial ovnk-master pod memory usage.\n")
checkMemoryCmd := fmt.Sprintf("oc -n openshift-ovn-kubernetes adm top pod | sed '1d' | awk 'NR==%d{print $1,$3}'", flag)
checkMemory1, err := exec.Command("bash", "-c", checkMemoryCmd).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Repeat creating, deleting then recreating same network policy 15 times.\n")
networkPolicyResource := networkPolicyResource{
name: "ingress-networkpolicy",
namespace: ns,
policy: "ingress",
policyType: "Ingress",
direction1: "from",
namespaceSel1: "matchLabels",
namespaceSelKey1: matchLabelKey,
namespaceSelVal1: ns,
template: ingressNPPolicyTemplate,
}
for i := 0; i < 15; i++ {
// Create network policy
networkPolicyResource.createNetworkPolicy(oc)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, networkPolicyResource.name)).To(o.BeTrue())
// Delete network policy
removeResource(oc, true, true, "networkpolicy", networkPolicyResource.name, "-n", ns)
}
exutil.By("5. Compare the goroutine call value between pre and post output.\n")
PostGoroutineOut, err := exec.Command("bash", "-c", getGoroutineOut).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(PostGoroutineOut).NotTo(o.BeEmpty())
e2e.Logf("PostGoroutineOut is: %s", PostGoroutineOut)
o.Expect(string(PreGoroutineOut) == string(PostGoroutineOut)).To(o.BeTrue())
exutil.By("6. Verify ovnk-master pod memory usage should be the same as previous.\n")
// wait for ovnk-master leader pod to be stable
checkErr = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) {
checkMemory2, err := exec.Command("bash", "-c", checkMemoryCmd).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
if string(checkMemory2) == string(checkMemory1) {
e2e.Logf("Memory usage is the same as previous.")
return true, nil
}
e2e.Logf("%v,Waiting for ovnk-master pod stable, try again ...,", err)
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Check the memory usage timeout.")
})
| |||||
test
|
openshift/openshift-tests-private
|
31c562e6-fa54-4472-8a9e-c77348614066
|
networkpolicy_udn
|
import (
"fmt"
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy_udn.go
|
package networking
import (
"fmt"
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN udn networkpolicy", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-udn", exutil.KubeConfigPath())
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
)
g.BeforeEach(func() {
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
})
g.It("Author:asood-High-78292-Validate ingress allow-same-namespace and allow-all-namespaces network policies in Layer 3 NAD.", func() {
var (
testID = "78292"
testDataDir = exutil.FixturePath("testdata", "networking")
udnNADTemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
ingressDenyFile = filepath.Join(testDataDir, "networkpolicy/default-deny-ingress.yaml")
ingressAllowSameNSFile = filepath.Join(testDataDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressAllowAllNSFile = filepath.Join(testDataDir, "networkpolicy/allow-from-all-namespaces.yaml")
mtu int32 = 1300
nsPodMap = make(map[string][]string)
nadResourcename = "l3-network-"
topology = "layer3"
)
ipStackType := checkIPStackType(oc)
var nadName string
var nadNS []string = make([]string, 0, 4)
nsDefaultNetwork := oc.Namespace()
nadNetworkName := []string{"l3-network-test-1", "l3-network-test-2"}
exutil.By("1.0 Create 4 UDN namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
nadNS = append(nadNS, oc.Namespace())
}
nadNS = append(nadNS, nsDefaultNetwork)
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.152.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2012:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16/24,2012:100:200::0/60"}
}
}
exutil.By("2. Create Layer 3 NAD in first two namespaces")
// Same network name in both namespaces
nad := make([]udnNetDefResource, 4)
for i := 0; i < 2; i++ {
nadName = nadResourcename + strconv.Itoa(i) + "-" + testID
if i == 1 {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nadNS[i], "team=ocp").Execute()).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("Create NAD %s in namespace %s", nadName, nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadName,
namespace: nadNS[i],
nad_network_name: nadNetworkName[0],
topology: topology,
subnet: subnet[0],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadName,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("3. Create two pods in each namespace")
pod := make([]udnPodResource, 4)
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
CurlPod2PodPassUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][1], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("4. Create default deny ingress type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], ingressDenyFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
exutil.By("5. Validate traffic between pods in first namespace and from pods in second namespace")
CurlPod2PodFailUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][1], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("6. Create allow same namespace ingress type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], ingressAllowSameNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-same-namespace"))
exutil.By("7. Validate traffic between pods in first namespace works but traffic from pod in second namespace is blocked")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][1], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("8. Create allow ingress from all namespaces networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], ingressAllowAllNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-all-namespaces"))
exutil.By("9. Validate traffic from pods in second namespace")
CurlPod2PodPassUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By(fmt.Sprintf("10. Create NAD with same network %s in namespace %s as the first two namespaces and %s (different network) in %s", nadNetworkName[0], nadNS[2], nadNetworkName[1], nadNS[3]))
for i := 2; i < 4; i++ {
nad[i] = udnNetDefResource{
nadname: nadResourcename + strconv.Itoa(i) + "-" + testID,
namespace: nadNS[i],
nad_network_name: nadNetworkName[i-2],
topology: topology,
subnet: subnet[i-2],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename + strconv.Itoa(i) + "-" + testID,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("11. Create one pod each in last three namespaces, last one being without NAD")
pod = make([]udnPodResource, 6)
for i := 2; i < 5; i++ {
for j := 0; j < 1; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
exutil.By("12. Validate traffic from pods in third and fourth namespace works but not from pod in fifth namespace (default)")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[2], nsPodMap[nadNS[2]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[3], nsPodMap[nadNS[3]][0])
CurlPod2PodFail(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[4], nsPodMap[nadNS[4]][0])
exutil.By("13. Update allow-all-namespaces policy with label to allow ingress traffic from pod in second namespace only")
npPatch := `[{"op": "replace", "path": "/spec/ingress/0/from/0/namespaceSelector", "value": {"matchLabels": {"team": "ocp" }}}]`
patchReplaceResourceAsAdmin(oc, "networkpolicy/allow-from-all-namespaces", npPatch, nadNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "allow-from-all-namespaces", "-n", nadNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
exutil.By("14. Validate traffic from pods in second namespace works but fails from pod in third namespace")
CurlPod2PodPassUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[2], nsPodMap[nadNS[2]][0], nadNS[0], nsPodMap[nadNS[0]][0])
})
g.It("Author:asood-High-79092-Validate egress allow-same-namespace and allow-all-namespaces network policies in Layer 2 NAD.", func() {
var (
testID = "79092"
testDataDir = exutil.FixturePath("testdata", "networking")
udnNADTemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
egressDenyFile = filepath.Join(testDataDir, "networkpolicy/default-deny-egress.yaml")
egressAllowSameNSFile = filepath.Join(testDataDir, "networkpolicy/allow-to-same-namespace.yaml")
egressAllowAllNSFile = filepath.Join(testDataDir, "networkpolicy/allow-to-all-namespaces.yaml")
mtu int32 = 1300
nsPodMap = make(map[string][]string)
nadResourcename = "l2-network-"
topology = "layer2"
)
ipStackType := checkIPStackType(oc)
var nadName string
var nadNS []string = make([]string, 0, 4)
nadNetworkName := []string{"l2-network-test-1", "l2-network-test-2"}
nsDefaultNetwork := oc.Namespace()
exutil.By("1.0 Create 4 UDN namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
nadNS = append(nadNS, oc.Namespace())
}
nadNS = append(nadNS, nsDefaultNetwork)
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16", "10.152.0.0/16"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2012:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"}
}
}
exutil.By("2. Create Layer 2 NAD in first two namespaces")
nad := make([]udnNetDefResource, 4)
for i := 0; i < 2; i++ {
nadName = nadResourcename + strconv.Itoa(i) + "-" + testID
if i == 1 {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nadNS[i], "team=ocp").Execute()).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("Create NAD %s in namespace %s", nadName, nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadName,
namespace: nadNS[i],
nad_network_name: nadNetworkName[0],
topology: topology,
subnet: subnet[0],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadName,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("3. Create two pods in each namespace")
pod := make([]udnPodResource, 4)
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[0], nsPodMap[nadNS[0]][1])
exutil.By("4. Create default deny egresss type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], egressDenyFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-egress"))
exutil.By("5. Validate traffic between pods in first namespace and from pods in second namespace")
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[0], nsPodMap[nadNS[0]][1])
exutil.By("6. Create allow egress to same namespace networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], egressAllowSameNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-same-namespace"))
exutil.By("7. Validate traffic between pods in first namespace works but traffic from pod in second namespace is blocked")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[0], nsPodMap[nadNS[0]][1])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
exutil.By("8. Create allow all namespaces egress type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], egressAllowAllNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-all-namespaces"))
exutil.By("9. Validate traffic to pods in second namespace")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
exutil.By(fmt.Sprintf("10. Create NAD with same network %s in namespace %s as the first two namespaces and %s (different network) in %s", nadNetworkName[0], nadNS[2], nadNetworkName[1], nadNS[3]))
for i := 2; i < 4; i++ {
nad[i] = udnNetDefResource{
nadname: nadResourcename + strconv.Itoa(i) + "-" + testID,
namespace: nadNS[i],
nad_network_name: nadNetworkName[i-2],
topology: topology,
subnet: subnet[i-2],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename + strconv.Itoa(i) + "-" + testID,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("11. Create one pod each in last three namespaces, last one being without NAD")
pod = make([]udnPodResource, 6)
for i := 2; i < 5; i++ {
for j := 0; j < 1; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
exutil.By("12. Validate traffic to pods in third and fourth namespace works but not to pod in fifth namespace (default)")
CurlPod2PodPassUDN(oc, nadNS[2], nsPodMap[nadNS[2]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[3], nsPodMap[nadNS[3]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFail(oc, nadNS[4], nsPodMap[nadNS[4]][0], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("13. Update allow-all-namespaces policy with label to allow ingress traffic to pod in second namespace only")
npPatch := `[{"op": "replace", "path": "/spec/egress/0/to/0/namespaceSelector", "value": {"matchLabels": {"team": "ocp" }}}]`
patchReplaceResourceAsAdmin(oc, "networkpolicy/allow-to-all-namespaces", npPatch, nadNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "allow-to-all-namespaces", "-n", nadNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
exutil.By("14. Validate traffic to pods in second namespace works but fails to pod in third namespace")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[2], nsPodMap[nadNS[2]][0])
})
g.It("Author:asood-High-79093-Validate ingress CIDR block with and without except clause network policies in Layer 3 CUDN.", func() {
var (
testID = "79093"
testDataDir = exutil.FixturePath("testdata", "networking")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
udnPodNodeTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template_node.yaml")
ingressDenyFile = filepath.Join(testDataDir, "networkpolicy/default-deny-ingress.yaml")
ipBlockIngressTemplateDual = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-ingress-dual-CIDRs-template.yaml")
ipBlockIngressTemplateSingle = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-ingress-single-CIDR-template.yaml")
nsPodMap = make(map[string][]string)
topology = "layer3"
matchLabelKey = "test.io"
matchLabelVal = "ns-" + testID
cudnCRDName = "cudn-l3-network-" + testID
udnCRDName = "udn-l3-network-" + testID + "-0"
)
ipStackType := checkIPStackType(oc)
var allNS []string = make([]string, 0, 3)
var ipBlockPolicyName string
var podCount int
nsDefaultNetwork := oc.Namespace()
exutil.By("1.0 Create 3 UDN namespaces")
for i := 0; i < 3; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
// Label first two for CUDN
if i < 2 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchLabelVal)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// Annotate first namespace for ACL logging
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "alert"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", allNS[0], aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
allNS = append(allNS, nsDefaultNetwork)
var cidr0, ipv4cidr0, ipv6cidr0, cidr1, ipv4cidr1, ipv6cidr1 string
if ipStackType == "ipv4single" {
cidr0 = "10.150.0.0/16"
cidr1 = "10.152.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr0 = "2010:100:200::0/48"
cidr1 = "2012:100:200::0/48"
} else {
ipv4cidr0 = "10.150.0.0/16"
ipv4cidr1 = "10.152.0.0/16"
ipv6cidr0 = "2010:100:200::0/48"
ipv6cidr1 = "2012:100:200::0/48"
}
}
exutil.By("2. Create default deny ingress type networkpolicy in first namespace before UDN is created")
createResourceFromFile(oc, allNS[0], ingressDenyFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", allNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
exutil.By("3. Create Layer 3 UDN in first two namespaces with CUDN resource and UDN in third")
defer removeResource(oc, true, true, "clusteruserdefinednetwork", cudnCRDName)
_, cudnErr := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchLabelVal, cudnCRDName, ipv4cidr0, ipv6cidr0, cidr0, topology)
o.Expect(cudnErr).NotTo(o.HaveOccurred())
defer removeResource(oc, true, true, "userdefinednetwork", udnCRDName)
createGeneralUDNCRD(oc, allNS[2], udnCRDName, ipv4cidr1, ipv6cidr1, cidr1, topology)
exutil.By("4. Create two pods in each namespace")
podCount = 2
pod := make([]udnPodResource, 4)
for i := 0; i < len(allNS); i++ {
if i == 2 {
podCount = 1
}
for j := 0; j < podCount; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: allNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
defer removeResource(oc, true, true, "pod", pod[j].name, "-n", pod[j].namespace)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
exutil.By("5. Validate traffic between pods in first namespace and from pods in second namespace")
CurlPod2PodFailUDN(oc, allNS[1], nsPodMap[allNS[1]][0], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodFailUDN(oc, allNS[0], nsPodMap[allNS[0]][1], allNS[0], nsPodMap[allNS[0]][0])
exutil.By("6. Get node name and IPs of first pod in first namespace")
podNodeName, podNodeNameErr := exutil.GetPodNodeName(oc, allNS[0], nsPodMap[allNS[0]][0])
o.Expect(podNodeNameErr).NotTo(o.HaveOccurred())
o.Expect(podNodeName).NotTo(o.BeEmpty())
exutil.By("7. Validate verdict=drop message")
output, logErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", podNodeName, "--path=ovn/acl-audit-log.log").Output()
o.Expect(logErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=drop")).To(o.BeTrue())
exutil.By("8. Create IP Block ingress policy to allow traffic from first pod in second namespace to first pod in first")
var cidrIpv4, cidrIpv6, cidr string
if ipStackType == "dualstack" {
exutil.By(fmt.Sprintf("Create ipBlock Ingress Dual CIDRs Policy in %s", allNS[0]))
pod1ns1IPv6, pod1ns1IPv4 := getPodIPUDN(oc, allNS[1], nsPodMap[allNS[1]][0], "ovn-udn1")
cidrIpv4 = pod1ns1IPv4 + "/32"
cidrIpv6 = pod1ns1IPv6 + "/128"
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress",
template: ipBlockIngressTemplateDual,
cidrIpv4: cidrIpv4,
cidrIpv6: cidrIpv6,
namespace: allNS[0],
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
ipBlockPolicyName = npIPBlockNS1.name
} else {
pod1ns1, _ := getPodIPUDN(oc, allNS[1], nsPodMap[allNS[1]][0], "ovn-udn1")
if ipStackType == "ipv6single" {
cidr = pod1ns1 + "/128"
} else {
cidr = pod1ns1 + "/32"
}
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress",
template: ipBlockIngressTemplateSingle,
cidr: cidr,
namespace: allNS[0],
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
ipBlockPolicyName = npIPBlockNS1.name
}
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", allNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(ipBlockPolicyName))
exutil.By("9. Validate traffic to first pod in first namespace is allowed from first pod in second namespace and verdict=allow in ACL audit log")
CurlPod2PodPassUDN(oc, allNS[1], nsPodMap[allNS[1]][0], allNS[0], nsPodMap[allNS[0]][0])
output, logErr = oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", podNodeName, "--path=ovn/acl-audit-log.log").Output()
o.Expect(logErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=allow")).To(o.BeTrue())
exutil.By("10. Validate ingress traffic is not allowed from second pod in second namespace, pod in third namespace and pod in fourth (default network)")
CurlPod2PodFailUDN(oc, allNS[1], nsPodMap[allNS[1]][1], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodFailUDN(oc, allNS[2], nsPodMap[allNS[2]][0], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodFailUDN(oc, allNS[3], nsPodMap[allNS[3]][0], allNS[0], nsPodMap[allNS[0]][0])
exutil.By("11. Get node name of first pod in second namespace and schedule another pod on smae node")
podNodeName, podNodeNameErr = exutil.GetPodNodeName(oc, allNS[1], nsPodMap[allNS[1]][0])
o.Expect(podNodeNameErr).NotTo(o.HaveOccurred())
o.Expect(podNodeName).NotTo(o.BeEmpty())
newPod := udnPodResourceNode{
name: "hello-pod-" + testID + "-1-2",
namespace: allNS[1],
label: "hello-pod",
nodename: podNodeName,
template: udnPodNodeTemplate,
}
newPod.createUdnPodNode(oc)
defer removeResource(oc, true, true, "pod", newPod.name, "-n", newPod.namespace)
waitPodReady(oc, newPod.namespace, newPod.name)
exutil.By(fmt.Sprintf("12. Update the %s policy to include except clause to block the ingress from the first pod in second", ipBlockPolicyName))
var patchPayload string
if ipStackType == "dualstack" {
hostSubnetIPv4, hostSubnetIPv6 := getNodeSubnetDualStack(oc, podNodeName, "cluster_udn_"+cudnCRDName)
patchPayload = fmt.Sprintf("[{\"op\": \"replace\", \"path\":\"/spec/ingress/0/from\", \"value\": [{\"ipBlock\":{\"cidr\":%s,\"except\":[%s]}},{\"ipBlock\":{\"cidr\":%s,\"except\":[%s]}}] }]", hostSubnetIPv4, cidrIpv4, hostSubnetIPv6, cidrIpv6)
} else {
hostSubnetCIDR := getNodeSubnet(oc, podNodeName, "cluster_udn_"+cudnCRDName)
patchPayload = fmt.Sprintf("[{\"op\": \"replace\", \"path\":\"/spec/ingress/0/from\", \"value\": [{\"ipBlock\":{\"cidr\":%s,\"except\":[%s]}}] }]", hostSubnetCIDR, cidr)
}
patchReplaceResourceAsAdmin(oc, "networkpolicy/"+ipBlockPolicyName, patchPayload, allNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", ipBlockPolicyName, "-n", allNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
CurlPod2PodFailUDN(oc, allNS[1], nsPodMap[allNS[1]][0], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodPassUDN(oc, allNS[1], newPod.name, allNS[0], nsPodMap[allNS[0]][0])
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
28267276-2f6b-40cb-8f2f-0835f1778ca2
|
Author:asood-High-78292-Validate ingress allow-same-namespace and allow-all-namespaces network policies in Layer 3 NAD.
|
['"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy_udn.go
|
g.It("Author:asood-High-78292-Validate ingress allow-same-namespace and allow-all-namespaces network policies in Layer 3 NAD.", func() {
var (
testID = "78292"
testDataDir = exutil.FixturePath("testdata", "networking")
udnNADTemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
ingressDenyFile = filepath.Join(testDataDir, "networkpolicy/default-deny-ingress.yaml")
ingressAllowSameNSFile = filepath.Join(testDataDir, "networkpolicy/allow-from-same-namespace.yaml")
ingressAllowAllNSFile = filepath.Join(testDataDir, "networkpolicy/allow-from-all-namespaces.yaml")
mtu int32 = 1300
nsPodMap = make(map[string][]string)
nadResourcename = "l3-network-"
topology = "layer3"
)
ipStackType := checkIPStackType(oc)
var nadName string
var nadNS []string = make([]string, 0, 4)
nsDefaultNetwork := oc.Namespace()
nadNetworkName := []string{"l3-network-test-1", "l3-network-test-2"}
exutil.By("1.0 Create 4 UDN namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
nadNS = append(nadNS, oc.Namespace())
}
nadNS = append(nadNS, nsDefaultNetwork)
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.152.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2012:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.152.0.0/16/24,2012:100:200::0/60"}
}
}
exutil.By("2. Create Layer 3 NAD in first two namespaces")
// Same network name in both namespaces
nad := make([]udnNetDefResource, 4)
for i := 0; i < 2; i++ {
nadName = nadResourcename + strconv.Itoa(i) + "-" + testID
if i == 1 {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nadNS[i], "team=ocp").Execute()).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("Create NAD %s in namespace %s", nadName, nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadName,
namespace: nadNS[i],
nad_network_name: nadNetworkName[0],
topology: topology,
subnet: subnet[0],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadName,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("3. Create two pods in each namespace")
pod := make([]udnPodResource, 4)
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
CurlPod2PodPassUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][1], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("4. Create default deny ingress type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], ingressDenyFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
exutil.By("5. Validate traffic between pods in first namespace and from pods in second namespace")
CurlPod2PodFailUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][1], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("6. Create allow same namespace ingress type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], ingressAllowSameNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-same-namespace"))
exutil.By("7. Validate traffic between pods in first namespace works but traffic from pod in second namespace is blocked")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][1], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("8. Create allow ingress from all namespaces networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], ingressAllowAllNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-all-namespaces"))
exutil.By("9. Validate traffic from pods in second namespace")
CurlPod2PodPassUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By(fmt.Sprintf("10. Create NAD with same network %s in namespace %s as the first two namespaces and %s (different network) in %s", nadNetworkName[0], nadNS[2], nadNetworkName[1], nadNS[3]))
for i := 2; i < 4; i++ {
nad[i] = udnNetDefResource{
nadname: nadResourcename + strconv.Itoa(i) + "-" + testID,
namespace: nadNS[i],
nad_network_name: nadNetworkName[i-2],
topology: topology,
subnet: subnet[i-2],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename + strconv.Itoa(i) + "-" + testID,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("11. Create one pod each in last three namespaces, last one being without NAD")
pod = make([]udnPodResource, 6)
for i := 2; i < 5; i++ {
for j := 0; j < 1; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
exutil.By("12. Validate traffic from pods in third and fourth namespace works but not from pod in fifth namespace (default)")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[2], nsPodMap[nadNS[2]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[3], nsPodMap[nadNS[3]][0])
CurlPod2PodFail(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[4], nsPodMap[nadNS[4]][0])
exutil.By("13. Update allow-all-namespaces policy with label to allow ingress traffic from pod in second namespace only")
npPatch := `[{"op": "replace", "path": "/spec/ingress/0/from/0/namespaceSelector", "value": {"matchLabels": {"team": "ocp" }}}]`
patchReplaceResourceAsAdmin(oc, "networkpolicy/allow-from-all-namespaces", npPatch, nadNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "allow-from-all-namespaces", "-n", nadNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
exutil.By("14. Validate traffic from pods in second namespace works but fails from pod in third namespace")
CurlPod2PodPassUDN(oc, nadNS[1], nsPodMap[nadNS[1]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[2], nsPodMap[nadNS[2]][0], nadNS[0], nsPodMap[nadNS[0]][0])
})
| |||||
test case
|
openshift/openshift-tests-private
|
79501ec1-3b69-4444-abda-468eb67cf9cd
|
Author:asood-High-79092-Validate egress allow-same-namespace and allow-all-namespaces network policies in Layer 2 NAD.
|
['"fmt"', '"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy_udn.go
|
g.It("Author:asood-High-79092-Validate egress allow-same-namespace and allow-all-namespaces network policies in Layer 2 NAD.", func() {
var (
testID = "79092"
testDataDir = exutil.FixturePath("testdata", "networking")
udnNADTemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
egressDenyFile = filepath.Join(testDataDir, "networkpolicy/default-deny-egress.yaml")
egressAllowSameNSFile = filepath.Join(testDataDir, "networkpolicy/allow-to-same-namespace.yaml")
egressAllowAllNSFile = filepath.Join(testDataDir, "networkpolicy/allow-to-all-namespaces.yaml")
mtu int32 = 1300
nsPodMap = make(map[string][]string)
nadResourcename = "l2-network-"
topology = "layer2"
)
ipStackType := checkIPStackType(oc)
var nadName string
var nadNS []string = make([]string, 0, 4)
nadNetworkName := []string{"l2-network-test-1", "l2-network-test-2"}
nsDefaultNetwork := oc.Namespace()
exutil.By("1.0 Create 4 UDN namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
nadNS = append(nadNS, oc.Namespace())
}
nadNS = append(nadNS, nsDefaultNetwork)
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16", "10.152.0.0/16"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2012:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16,2010:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60"}
}
}
exutil.By("2. Create Layer 2 NAD in first two namespaces")
nad := make([]udnNetDefResource, 4)
for i := 0; i < 2; i++ {
nadName = nadResourcename + strconv.Itoa(i) + "-" + testID
if i == 1 {
o.Expect(oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", nadNS[i], "team=ocp").Execute()).NotTo(o.HaveOccurred())
}
exutil.By(fmt.Sprintf("Create NAD %s in namespace %s", nadName, nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadName,
namespace: nadNS[i],
nad_network_name: nadNetworkName[0],
topology: topology,
subnet: subnet[0],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadName,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("3. Create two pods in each namespace")
pod := make([]udnPodResource, 4)
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[0], nsPodMap[nadNS[0]][1])
exutil.By("4. Create default deny egresss type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], egressDenyFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-egress"))
exutil.By("5. Validate traffic between pods in first namespace and from pods in second namespace")
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[0], nsPodMap[nadNS[0]][1])
exutil.By("6. Create allow egress to same namespace networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], egressAllowSameNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-same-namespace"))
exutil.By("7. Validate traffic between pods in first namespace works but traffic from pod in second namespace is blocked")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[0], nsPodMap[nadNS[0]][1])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
exutil.By("8. Create allow all namespaces egress type networkpolicy in first namespace")
createResourceFromFile(oc, nadNS[0], egressAllowAllNSFile)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", nadNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-to-all-namespaces"))
exutil.By("9. Validate traffic to pods in second namespace")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
exutil.By(fmt.Sprintf("10. Create NAD with same network %s in namespace %s as the first two namespaces and %s (different network) in %s", nadNetworkName[0], nadNS[2], nadNetworkName[1], nadNS[3]))
for i := 2; i < 4; i++ {
nad[i] = udnNetDefResource{
nadname: nadResourcename + strconv.Itoa(i) + "-" + testID,
namespace: nadNS[i],
nad_network_name: nadNetworkName[i-2],
topology: topology,
subnet: subnet[i-2],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename + strconv.Itoa(i) + "-" + testID,
role: "primary",
template: udnNADTemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("11. Create one pod each in last three namespaces, last one being without NAD")
pod = make([]udnPodResource, 6)
for i := 2; i < 5; i++ {
for j := 0; j < 1; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
exutil.By("12. Validate traffic to pods in third and fourth namespace works but not to pod in fifth namespace (default)")
CurlPod2PodPassUDN(oc, nadNS[2], nsPodMap[nadNS[2]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFailUDN(oc, nadNS[3], nsPodMap[nadNS[3]][0], nadNS[0], nsPodMap[nadNS[0]][0])
CurlPod2PodFail(oc, nadNS[4], nsPodMap[nadNS[4]][0], nadNS[0], nsPodMap[nadNS[0]][0])
exutil.By("13. Update allow-all-namespaces policy with label to allow ingress traffic to pod in second namespace only")
npPatch := `[{"op": "replace", "path": "/spec/egress/0/to/0/namespaceSelector", "value": {"matchLabels": {"team": "ocp" }}}]`
patchReplaceResourceAsAdmin(oc, "networkpolicy/allow-to-all-namespaces", npPatch, nadNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "allow-to-all-namespaces", "-n", nadNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
exutil.By("14. Validate traffic to pods in second namespace works but fails to pod in third namespace")
CurlPod2PodPassUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[1], nsPodMap[nadNS[1]][0])
CurlPod2PodFailUDN(oc, nadNS[0], nsPodMap[nadNS[0]][0], nadNS[2], nsPodMap[nadNS[2]][0])
})
| |||||
test case
|
openshift/openshift-tests-private
|
d60eb1f5-aab7-4cb2-8c3e-e021339fca89
|
Author:asood-High-79093-Validate ingress CIDR block with and without except clause network policies in Layer 3 CUDN.
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/networkpolicy_udn.go
|
g.It("Author:asood-High-79093-Validate ingress CIDR block with and without except clause network policies in Layer 3 CUDN.", func() {
var (
testID = "79093"
testDataDir = exutil.FixturePath("testdata", "networking")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
udnPodNodeTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template_node.yaml")
ingressDenyFile = filepath.Join(testDataDir, "networkpolicy/default-deny-ingress.yaml")
ipBlockIngressTemplateDual = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-ingress-dual-CIDRs-template.yaml")
ipBlockIngressTemplateSingle = filepath.Join(testDataDir, "networkpolicy/ipblock/ipBlock-ingress-single-CIDR-template.yaml")
nsPodMap = make(map[string][]string)
topology = "layer3"
matchLabelKey = "test.io"
matchLabelVal = "ns-" + testID
cudnCRDName = "cudn-l3-network-" + testID
udnCRDName = "udn-l3-network-" + testID + "-0"
)
ipStackType := checkIPStackType(oc)
var allNS []string = make([]string, 0, 3)
var ipBlockPolicyName string
var podCount int
nsDefaultNetwork := oc.Namespace()
exutil.By("1.0 Create 3 UDN namespaces")
for i := 0; i < 3; i++ {
oc.CreateNamespaceUDN()
ns := oc.Namespace()
allNS = append(allNS, ns)
// Label first two for CUDN
if i < 2 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchLabelVal)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// Annotate first namespace for ACL logging
aclSettings := aclSettings{DenySetting: "alert", AllowSetting: "alert"}
err1 := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", allNS[0], aclSettings.getJSONString()).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
allNS = append(allNS, nsDefaultNetwork)
var cidr0, ipv4cidr0, ipv6cidr0, cidr1, ipv4cidr1, ipv6cidr1 string
if ipStackType == "ipv4single" {
cidr0 = "10.150.0.0/16"
cidr1 = "10.152.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr0 = "2010:100:200::0/48"
cidr1 = "2012:100:200::0/48"
} else {
ipv4cidr0 = "10.150.0.0/16"
ipv4cidr1 = "10.152.0.0/16"
ipv6cidr0 = "2010:100:200::0/48"
ipv6cidr1 = "2012:100:200::0/48"
}
}
exutil.By("2. Create default deny ingress type networkpolicy in first namespace before UDN is created")
createResourceFromFile(oc, allNS[0], ingressDenyFile)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", allNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
exutil.By("3. Create Layer 3 UDN in first two namespaces with CUDN resource and UDN in third")
defer removeResource(oc, true, true, "clusteruserdefinednetwork", cudnCRDName)
_, cudnErr := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchLabelVal, cudnCRDName, ipv4cidr0, ipv6cidr0, cidr0, topology)
o.Expect(cudnErr).NotTo(o.HaveOccurred())
defer removeResource(oc, true, true, "userdefinednetwork", udnCRDName)
createGeneralUDNCRD(oc, allNS[2], udnCRDName, ipv4cidr1, ipv6cidr1, cidr1, topology)
exutil.By("4. Create two pods in each namespace")
podCount = 2
pod := make([]udnPodResource, 4)
for i := 0; i < len(allNS); i++ {
if i == 2 {
podCount = 1
}
for j := 0; j < podCount; j++ {
pod[j] = udnPodResource{
name: "hello-pod-" + testID + "-" + strconv.Itoa(i) + "-" + strconv.Itoa(j),
namespace: allNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[j].createUdnPod(oc)
defer removeResource(oc, true, true, "pod", pod[j].name, "-n", pod[j].namespace)
waitPodReady(oc, pod[j].namespace, pod[j].name)
nsPodMap[pod[j].namespace] = append(nsPodMap[pod[j].namespace], pod[j].name)
}
}
exutil.By("5. Validate traffic between pods in first namespace and from pods in second namespace")
CurlPod2PodFailUDN(oc, allNS[1], nsPodMap[allNS[1]][0], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodFailUDN(oc, allNS[0], nsPodMap[allNS[0]][1], allNS[0], nsPodMap[allNS[0]][0])
exutil.By("6. Get node name and IPs of first pod in first namespace")
podNodeName, podNodeNameErr := exutil.GetPodNodeName(oc, allNS[0], nsPodMap[allNS[0]][0])
o.Expect(podNodeNameErr).NotTo(o.HaveOccurred())
o.Expect(podNodeName).NotTo(o.BeEmpty())
exutil.By("7. Validate verdict=drop message")
output, logErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", podNodeName, "--path=ovn/acl-audit-log.log").Output()
o.Expect(logErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=drop")).To(o.BeTrue())
exutil.By("8. Create IP Block ingress policy to allow traffic from first pod in second namespace to first pod in first")
var cidrIpv4, cidrIpv6, cidr string
if ipStackType == "dualstack" {
exutil.By(fmt.Sprintf("Create ipBlock Ingress Dual CIDRs Policy in %s", allNS[0]))
pod1ns1IPv6, pod1ns1IPv4 := getPodIPUDN(oc, allNS[1], nsPodMap[allNS[1]][0], "ovn-udn1")
cidrIpv4 = pod1ns1IPv4 + "/32"
cidrIpv6 = pod1ns1IPv6 + "/128"
npIPBlockNS1 := ipBlockCIDRsDual{
name: "ipblock-dual-cidrs-ingress",
template: ipBlockIngressTemplateDual,
cidrIpv4: cidrIpv4,
cidrIpv6: cidrIpv6,
namespace: allNS[0],
}
npIPBlockNS1.createipBlockCIDRObjectDual(oc)
ipBlockPolicyName = npIPBlockNS1.name
} else {
pod1ns1, _ := getPodIPUDN(oc, allNS[1], nsPodMap[allNS[1]][0], "ovn-udn1")
if ipStackType == "ipv6single" {
cidr = pod1ns1 + "/128"
} else {
cidr = pod1ns1 + "/32"
}
npIPBlockNS1 := ipBlockCIDRsSingle{
name: "ipblock-single-cidr-ingress",
template: ipBlockIngressTemplateSingle,
cidr: cidr,
namespace: allNS[0],
}
npIPBlockNS1.createipBlockCIDRObjectSingle(oc)
ipBlockPolicyName = npIPBlockNS1.name
}
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", "-n", allNS[0]).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(ipBlockPolicyName))
exutil.By("9. Validate traffic to first pod in first namespace is allowed from first pod in second namespace and verdict=allow in ACL audit log")
CurlPod2PodPassUDN(oc, allNS[1], nsPodMap[allNS[1]][0], allNS[0], nsPodMap[allNS[0]][0])
output, logErr = oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", podNodeName, "--path=ovn/acl-audit-log.log").Output()
o.Expect(logErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "verdict=allow")).To(o.BeTrue())
exutil.By("10. Validate ingress traffic is not allowed from second pod in second namespace, pod in third namespace and pod in fourth (default network)")
CurlPod2PodFailUDN(oc, allNS[1], nsPodMap[allNS[1]][1], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodFailUDN(oc, allNS[2], nsPodMap[allNS[2]][0], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodFailUDN(oc, allNS[3], nsPodMap[allNS[3]][0], allNS[0], nsPodMap[allNS[0]][0])
exutil.By("11. Get node name of first pod in second namespace and schedule another pod on smae node")
podNodeName, podNodeNameErr = exutil.GetPodNodeName(oc, allNS[1], nsPodMap[allNS[1]][0])
o.Expect(podNodeNameErr).NotTo(o.HaveOccurred())
o.Expect(podNodeName).NotTo(o.BeEmpty())
newPod := udnPodResourceNode{
name: "hello-pod-" + testID + "-1-2",
namespace: allNS[1],
label: "hello-pod",
nodename: podNodeName,
template: udnPodNodeTemplate,
}
newPod.createUdnPodNode(oc)
defer removeResource(oc, true, true, "pod", newPod.name, "-n", newPod.namespace)
waitPodReady(oc, newPod.namespace, newPod.name)
exutil.By(fmt.Sprintf("12. Update the %s policy to include except clause to block the ingress from the first pod in second", ipBlockPolicyName))
var patchPayload string
if ipStackType == "dualstack" {
hostSubnetIPv4, hostSubnetIPv6 := getNodeSubnetDualStack(oc, podNodeName, "cluster_udn_"+cudnCRDName)
patchPayload = fmt.Sprintf("[{\"op\": \"replace\", \"path\":\"/spec/ingress/0/from\", \"value\": [{\"ipBlock\":{\"cidr\":%s,\"except\":[%s]}},{\"ipBlock\":{\"cidr\":%s,\"except\":[%s]}}] }]", hostSubnetIPv4, cidrIpv4, hostSubnetIPv6, cidrIpv6)
} else {
hostSubnetCIDR := getNodeSubnet(oc, podNodeName, "cluster_udn_"+cudnCRDName)
patchPayload = fmt.Sprintf("[{\"op\": \"replace\", \"path\":\"/spec/ingress/0/from\", \"value\": [{\"ipBlock\":{\"cidr\":%s,\"except\":[%s]}}] }]", hostSubnetCIDR, cidr)
}
patchReplaceResourceAsAdmin(oc, "networkpolicy/"+ipBlockPolicyName, patchPayload, allNS[0])
npRules, npErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("networkpolicy", ipBlockPolicyName, "-n", allNS[0], "-o=jsonpath={.spec}").Output()
o.Expect(npErr).NotTo(o.HaveOccurred())
e2e.Logf("\n Network policy after update: %s", npRules)
CurlPod2PodFailUDN(oc, allNS[1], nsPodMap[allNS[1]][0], allNS[0], nsPodMap[allNS[0]][0])
CurlPod2PodPassUDN(oc, allNS[1], newPod.name, allNS[0], nsPodMap[allNS[0]][0])
})
| |||||
test
|
openshift/openshift-tests-private
|
2d794732-5e9c-43dd-8235-5f13518deec3
|
nmstate
|
import (
"fmt"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
// Package networking NMState operator tests
package networking
import (
"fmt"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN nmstate-operator installation", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-nmstate", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
installNMstateOperator(oc)
})
g.It("Author:qiowang-LEVEL0-StagerunBoth-Critical-47088-NMState Operator installation ", func() {
g.By("Checking nmstate operator installation")
e2e.Logf("Operator install check successfull as part of setup !!!!!")
e2e.Logf("SUCCESS - NMState operator installed")
})
})
var _ = g.Describe("[sig-networking] SDN nmstate-operator functional", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-nmstate", exutil.KubeConfigPath())
opNamespace = "openshift-nmstate"
)
g.BeforeEach(func() {
g.By("Check the platform and network plugin type if it is suitable for running the test")
networkType := checkNetworkType(oc)
if !(isPlatformSuitableForNMState(oc)) || !strings.Contains(networkType, "ovn") {
g.Skip("Skipping for unsupported platform or non-OVN network plugin type!")
}
installNMstateOperator(oc)
})
g.It("Author:qiowang-NonPreRelease-Longduration-High-46380-High-46382-High-46379-Create/Disable/Remove interface on node [Disruptive] [Slow]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. OCP-46380-Creating interface on node")
g.By("2.1 Configure NNCP for creating interface")
policyName := "dummy-policy-46380"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
ifacePolicyTemplate := generateTemplateAbsolutePath("iface-policy-template.yaml")
ifacePolicy := ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy0",
descr: "create interface",
ifacetype: "dummy",
state: "up",
template: ifacePolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, ifacePolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", ifacePolicy.ifacename)
}
}()
result, configErr1 := configIface(oc, ifacePolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created interface found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[?(@.name==\"dummy0\")].state}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
e2e.Logf("SUCCESS - the created interface found in node network state")
g.By("2.5 Verify the interface is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
matched, matchErr1 := regexp.MatchString("dummy\\s+dummy0", ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - interface is up and active on the node")
g.By("3. OCP-46382-Disabling interface on node")
g.By("3.1 Configure NNCP for disabling interface")
ifacePolicy = ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy0",
descr: "disable interface",
ifacetype: "dummy",
state: "down",
template: ifacePolicyTemplate,
}
result, configErr2 := configIface(oc, ifacePolicy)
o.Expect(configErr2).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no disabled interface found in node network state")
ifaceName1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceName1).ShouldNot(o.ContainSubstring("dummy0"))
e2e.Logf("SUCCESS - no disabled interface found in node network state")
g.By("3.5 Verify the interface is down on the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
matched, matchErr2 := regexp.MatchString("dummy\\s+--", ifaceList2)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - interface is down on the node")
g.By("4. OCP-46379-Removing interface on node")
g.By("4.1 Configure NNCP for removing interface")
ifacePolicy = ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy0",
descr: "remove interface",
ifacetype: "dummy",
state: "absent",
template: ifacePolicyTemplate,
}
result, configErr3 := configIface(oc, ifacePolicy)
o.Expect(configErr3).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
g.By("4.2 Verify the policy is applied")
nncpErr3 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr3, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("4.3 Verify the status of enactments is updated")
nnceErr3 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr3, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("4.4 Verify no removed interface found in node network state")
ifaceName2, nnsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr3).NotTo(o.HaveOccurred())
o.Expect(ifaceName2).ShouldNot(o.ContainSubstring("dummy0"))
e2e.Logf("SUCCESS - no removed interface found in node network state")
g.By("4.5 Verify the interface is removed from the node")
ifaceList3, ifaceErr3 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr3).NotTo(o.HaveOccurred())
matched, matchErr3 := regexp.MatchString("dummy0", ifaceList3)
o.Expect(matchErr3).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeFalse())
e2e.Logf("SUCCESS - interface is removed from the node")
})
g.It("Author:qiowang-LEVEL0-Critical-46329-Configure bond on node [Disruptive]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Creating bond on node")
g.By("2.1 Configure NNCP for creating bond")
policyName := "bond-policy-46329"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
bondPolicyTemplate := generateTemplateAbsolutePath("bond-policy-template.yaml")
bondPolicy := bondPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "bond01",
descr: "create bond",
port1: "dummy1",
port2: "dummy2",
state: "up",
template: bondPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bondPolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bondPolicy.port1)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bondPolicy.port2)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bondPolicy.ifacename)
}
}()
configErr1 := configBond(oc, bondPolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created bond found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="bond01")].state}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
e2e.Logf("SUCCESS - the created bond found in node network state")
g.By("2.5 Verify the bond is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
matched, matchErr1 := regexp.MatchString("bond\\s+bond01", ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - bond is up and active on the node")
g.By("3. Remove bond on node")
g.By("3.1 Configure NNCP for removing bond")
bondPolicy = bondPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "bond01",
descr: "remove bond",
port1: "dummy1",
port2: "dummy2",
state: "absent",
template: bondPolicyTemplate,
}
configErr2 := configBond(oc, bondPolicy)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no removed bond found in node network state")
ifaceName1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceName1).ShouldNot(o.ContainSubstring("bond01"))
e2e.Logf("SUCCESS - no removed bond found in node network state")
g.By("3.5 Verify the bond is removed from the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
matched, matchErr2 := regexp.MatchString("bond01", ifaceList2)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeFalse())
e2e.Logf("SUCCESS - bond is removed from the node")
})
g.It("Author:qiowang-Medium-46383-VLAN [Disruptive]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Creating vlan on node")
g.By("2.1 Configure NNCP for creating vlan")
policyName := "vlan-policy-46383"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeList).NotTo(o.BeEmpty())
nodeName := nodeList[0]
vlanPolicyTemplate := generateTemplateAbsolutePath("vlan-policy-template.yaml")
vlanPolicy := vlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy3.101",
descr: "create vlan",
baseiface: "dummy3",
vlanid: 101,
state: "up",
template: vlanPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, vlanPolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", vlanPolicy.ifacename)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", vlanPolicy.baseiface)
}
}()
configErr1 := vlanPolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created vlan found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+vlanPolicy.ifacename+`")].state}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
e2e.Logf("SUCCESS - the created vlan found in node network state")
g.By("2.5 Verify the vlan is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
matched, matchErr1 := regexp.MatchString("vlan\\s+"+vlanPolicy.ifacename, ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - vlan is up and active on the node")
g.By("3. Remove vlan on node")
g.By("3.1 Configure NNCP for removing vlan")
vlanPolicy = vlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy3.101",
descr: "remove vlan",
baseiface: "dummy3",
vlanid: 101,
state: "absent",
template: vlanPolicyTemplate,
}
configErr2 := vlanPolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no removed vlan found in node network state")
ifaceName1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceName1).ShouldNot(o.ContainSubstring(vlanPolicy.ifacename))
e2e.Logf("SUCCESS - no removed vlan found in node network state")
g.By("3.5 Verify the vlan is removed from the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceList2).ShouldNot(o.ContainSubstring(vlanPolicy.ifacename))
e2e.Logf("SUCCESS - vlan is removed from the node")
})
g.It("Author:qiowang-Medium-53346-Verify that it is able to reset linux-bridge vlan-filtering with vlan is empty [Disruptive]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Creating linux-bridge with vlan-filtering")
g.By("2.1 Configure NNCP for creating linux-bridge")
policyName := "bridge-policy-53346"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeList).NotTo(o.BeEmpty())
nodeName := nodeList[0]
bridgePolicyTemplate1 := generateTemplateAbsolutePath("bridge-policy-template.yaml")
bridgePolicy := bridgevlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
descr: "create linux-bridge with vlan-filtering",
port: "dummy4",
state: "up",
template: bridgePolicyTemplate1,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bridgePolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bridgePolicy.port)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bridgePolicy.ifacename)
}
}()
configErr1 := bridgePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created bridge found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="linux-br0")].state}`).Output()
bridgePort1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="linux-br0")].bridge.port[?(@.name=="dummy4")]}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
o.Expect(bridgePort1).Should(o.ContainSubstring("vlan"))
e2e.Logf("SUCCESS - the created bridge found in node network state")
g.By("2.5 Verify the bridge is up and active, vlan-filtering is enabled")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
vlanFilter1, vlanErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show", bridgePolicy.ifacename)
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(vlanErr1).NotTo(o.HaveOccurred())
matched1, matchErr1 := regexp.MatchString("bridge\\s+"+bridgePolicy.ifacename, ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched1).To(o.BeTrue())
matched2, matchErr2 := regexp.MatchString("bridge.vlan-filtering:\\s+yes", vlanFilter1)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched2).To(o.BeTrue())
e2e.Logf("SUCCESS - bridge is up and active, vlan-filtering is enabled")
g.By("3. Reset linux-bridge vlan-filtering with vlan: {}")
g.By("3.1 Configure NNCP for reset linux-bridge vlan-filtering")
bridgePolicyTemplate2 := generateTemplateAbsolutePath("reset-bridge-vlan-policy-template.yaml")
bridgePolicy = bridgevlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
descr: "reset linux-bridge vlan-filtering",
port: "dummy4",
state: "up",
template: bridgePolicyTemplate2,
}
configErr2 := bridgePolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no linux-bridge vlan-filtering found in node network state")
bridgePort2, nnsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="linux-br0")].bridge.port[?(@.name=="dummy4")]}`).Output()
o.Expect(nnsErr3).NotTo(o.HaveOccurred())
o.Expect(bridgePort2).ShouldNot(o.ContainSubstring("vlan"))
e2e.Logf("SUCCESS - no linux-bridge vlan-filtering found in node network state")
g.By("3.5 Verify the linux-bridge vlan-filtering is disabled")
vlanFilter2, vlanErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show", bridgePolicy.ifacename)
o.Expect(vlanErr2).NotTo(o.HaveOccurred())
matched3, matchErr3 := regexp.MatchString("bridge.vlan-filtering:\\s+no", vlanFilter2)
o.Expect(matchErr3).NotTo(o.HaveOccurred())
o.Expect(matched3).To(o.BeTrue())
e2e.Logf("SUCCESS - linux-bridge vlan-filtering is disabled")
g.By("4. Remove linux-bridge on node")
g.By("4.1 Configure NNCP for remove linux-bridge")
bridgePolicy = bridgevlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
descr: "remove linux-bridge",
port: "dummy4",
state: "absent",
template: bridgePolicyTemplate2,
}
configErr3 := bridgePolicy.configNNCP(oc)
o.Expect(configErr3).NotTo(o.HaveOccurred())
g.By("4.2 Verify the policy is applied")
nncpErr3 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr3, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("4.3 Verify the status of enactments is updated")
nnceErr3 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr3, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("4.4 Verify no removed linux-bridge found in node network state")
ifaceName2, nnsErr4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr4).NotTo(o.HaveOccurred())
o.Expect(ifaceName2).ShouldNot(o.ContainSubstring(bridgePolicy.ifacename))
e2e.Logf("SUCCESS - no removed linux-bridge found in node network state")
g.By("4.5 Verify the linux-bridge is removed from the node")
ifaceList2, ifaceErr3 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr3).NotTo(o.HaveOccurred())
o.Expect(ifaceList2).ShouldNot(o.ContainSubstring(bridgePolicy.ifacename))
e2e.Logf("SUCCESS - linux-bridge is removed from the node")
})
g.It("Author:qiowang-NonPreRelease-Medium-46327-Medium-46795-Medium-64854-Static IP and Route can be applied [Disruptive]", func() {
var (
ipAddrV4 = "192.0.2.251"
destAddrV4 = "198.51.100.0/24"
nextHopAddrV4 = "192.0.2.1"
ipAddrV6 = "2001:db8::1:1"
destAddrV6 = "2001:dc8::/64"
nextHopAddrV6 = "2001:db8::1:2"
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Apply static IP and Route on node")
g.By("2.1 Configure NNCP for static IP and Route")
policyName := "static-ip-route-46327"
policyTemplate := generateTemplateAbsolutePath("apply-static-ip-route-template.yaml")
stIPRoutePolicy := stIPRoutePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummyst",
descr: "apply static ip and route",
state: "up",
ipaddrv4: ipAddrV4,
destaddrv4: destAddrV4,
nexthopaddrv4: nextHopAddrV4,
ipaddrv6: ipAddrV6,
destaddrv6: destAddrV6,
nexthopaddrv6: nextHopAddrV6,
template: policyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, stIPRoutePolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", stIPRoutePolicy.ifacename)
}
}()
configErr := stIPRoutePolicy.configNNCP(oc)
o.Expect(configErr).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the static ip and route found in node network state")
iface, nnsIfaceErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+stIPRoutePolicy.ifacename+`")]}`).Output()
o.Expect(nnsIfaceErr).NotTo(o.HaveOccurred())
o.Expect(iface).Should(o.ContainSubstring(ipAddrV4))
o.Expect(iface).Should(o.ContainSubstring(ipAddrV6))
routes, nnsRoutesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.next-hop-interface=="`+stIPRoutePolicy.ifacename+`")]}`).Output()
o.Expect(nnsRoutesErr).NotTo(o.HaveOccurred())
o.Expect(routes).Should(o.ContainSubstring(destAddrV4))
o.Expect(routes).Should(o.ContainSubstring(nextHopAddrV4))
o.Expect(routes).Should(o.ContainSubstring(destAddrV6))
o.Expect(routes).Should(o.ContainSubstring(nextHopAddrV6))
e2e.Logf("SUCCESS - the static ip and route found in node network state")
g.By("2.5 Verify the static ip and route are shown on the node")
ifaceInfo, ifaceErr := exutil.DebugNode(oc, nodeName, "ip", "addr", "show", stIPRoutePolicy.ifacename)
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
o.Expect(ifaceInfo).Should(o.ContainSubstring(ipAddrV4))
o.Expect(ifaceInfo).Should(o.ContainSubstring(ipAddrV6))
v4Routes, routesV4Err := exutil.DebugNode(oc, nodeName, "ip", "-4", "route")
o.Expect(routesV4Err).NotTo(o.HaveOccurred())
o.Expect(v4Routes).Should(o.ContainSubstring(destAddrV4 + " via " + nextHopAddrV4 + " dev " + stIPRoutePolicy.ifacename))
v6Routes, routesV6Err := exutil.DebugNode(oc, nodeName, "ip", "-6", "route")
o.Expect(routesV6Err).NotTo(o.HaveOccurred())
o.Expect(v6Routes).Should(o.ContainSubstring(destAddrV6 + " via " + nextHopAddrV6 + " dev " + stIPRoutePolicy.ifacename))
e2e.Logf("SUCCESS - static ip and route are shown on the node")
// Step3 is for https://issues.redhat.com/browse/OCPBUGS-8229
g.By("3. Apply default gateway in non-default route table")
g.By("3.1 Configure NNCP for default gateway")
policyName2 := "default-route-64854"
policyTemplate2 := generateTemplateAbsolutePath("apply-route-template.yaml")
routePolicy := routePolicyResource{
name: policyName2,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummyst",
destaddr: "0.0.0.0/0",
nexthopaddr: nextHopAddrV4,
tableid: 66,
template: policyTemplate2,
}
defer removeResource(oc, true, true, "nncp", policyName2, "-n", opNamespace)
configErr2 := routePolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName2, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
g.By("3.3 Verify the status of enactments is updated")
nnceName2 := nodeName + "." + policyName2
nnceErr2 := checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
g.By("3.4 Verify the default gateway found in node network state")
routes, nnsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.table-id==66)]}`).Output()
o.Expect(nnsErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(routes, "0.0.0.0/0")).Should(o.BeTrue())
o.Expect(strings.Contains(routes, nextHopAddrV4)).Should(o.BeTrue())
g.By("3.5 Verify the default gateway is shown on the node")
defaultGW, gwErr := exutil.DebugNode(oc, nodeName, "ip", "-4", "route", "show", "default", "table", "66")
o.Expect(gwErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(defaultGW, "default via "+nextHopAddrV4+" dev "+stIPRoutePolicy.ifacename)).Should(o.BeTrue())
g.By("3.6 Verify there is no error logs for pinging default gateway shown in nmstate-handler pod")
podName, getPodErr := exutil.GetPodName(oc, opNamespace, "component=kubernetes-nmstate-handler", nodeName)
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
logs, logErr := exutil.GetSpecificPodLogs(oc, opNamespace, "", podName, "")
o.Expect(logErr).ShouldNot(o.HaveOccurred())
o.Expect(logs).NotTo(o.BeEmpty())
o.Expect(strings.Contains(logs, "error pinging default gateway")).Should(o.BeFalse())
g.By("4. Remove static ip and route on node")
g.By("4.1 Configure NNCP for removing static ip and route")
policyTemplate = generateTemplateAbsolutePath("remove-static-ip-route-template.yaml")
stIPRoutePolicy = stIPRoutePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummyst",
descr: "remove static ip and route",
state: "absent",
ipaddrv4: ipAddrV4,
ipaddrv6: ipAddrV6,
template: policyTemplate,
}
configErr1 := stIPRoutePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("4.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("4.3 Verify the status of enactments is updated")
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("4.4 Verify static ip and route cannot be found in node network state")
iface1, nnsIfaceErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*]}").Output()
o.Expect(nnsIfaceErr1).NotTo(o.HaveOccurred())
o.Expect(iface1).ShouldNot(o.ContainSubstring(stIPRoutePolicy.ifacename))
o.Expect(iface1).ShouldNot(o.ContainSubstring(ipAddrV4))
o.Expect(iface1).ShouldNot(o.ContainSubstring(ipAddrV6))
routes1, nnsRoutesErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes}`).Output()
o.Expect(nnsRoutesErr1).NotTo(o.HaveOccurred())
o.Expect(routes1).ShouldNot(o.ContainSubstring(destAddrV4))
o.Expect(routes1).ShouldNot(o.ContainSubstring(nextHopAddrV4))
o.Expect(routes1).ShouldNot(o.ContainSubstring(destAddrV6))
o.Expect(routes1).ShouldNot(o.ContainSubstring(nextHopAddrV6))
g.By("4.5 Verify the static ip and route are removed from the node")
ifaceInfo1, ifaceErr1 := exutil.DebugNode(oc, nodeName, "ip", "addr", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceInfo1).ShouldNot(o.ContainSubstring(stIPRoutePolicy.ifacename))
o.Expect(ifaceInfo1).ShouldNot(o.ContainSubstring(ipAddrV4))
o.Expect(ifaceInfo1).ShouldNot(o.ContainSubstring(ipAddrV6))
v4Routes1, routesV4Err1 := exutil.DebugNode(oc, nodeName, "ip", "-4", "route")
o.Expect(routesV4Err1).NotTo(o.HaveOccurred())
o.Expect(v4Routes1).ShouldNot(o.ContainSubstring(destAddrV4 + " via " + nextHopAddrV4 + " dev " + stIPRoutePolicy.ifacename))
v6Routes1, routesV6Err1 := exutil.DebugNode(oc, nodeName, "ip", "-6", "route")
o.Expect(routesV6Err1).NotTo(o.HaveOccurred())
o.Expect(v6Routes1).ShouldNot(o.ContainSubstring(destAddrV6 + " via " + nextHopAddrV6 + " dev " + stIPRoutePolicy.ifacename))
e2e.Logf("SUCCESS - static ip and route are removed from the node")
})
g.It("Author:qiowang-NonPreRelease-Medium-66174-Verify knmstate operator support for IPv6 single stack - ipv6 default route [Disruptive]", func() {
exutil.By("Check the platform if it is suitable for running the test")
platform := checkPlatform(oc)
ipStackType := checkIPStackType(oc)
if ipStackType != "ipv6single" || !strings.Contains(platform, "baremetal") {
g.Skip("Should be tested on IPv6 single stack platform(IPI BM), skipping!")
}
var (
destAddr = "::/0"
nextHopAddr = "fd00:1101::1"
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
cmd := `nmcli dev | grep -v 'ovs' | egrep 'ethernet +connected' | awk '{print $1}'`
ifNameInfo, ifNameErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
o.Expect(ifNameErr).NotTo(o.HaveOccurred())
ifName := strings.Split(ifNameInfo, "\n")[0]
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Apply default routes on node")
exutil.By("2.1 Configure NNCP for default route in main route table")
policyTemplate := generateTemplateAbsolutePath("apply-route-template.yaml")
policyName1 := "default-route-in-main-table-66174"
routePolicy1 := routePolicyResource{
name: policyName1,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 254,
template: policyTemplate,
}
defer deleteNNCP(oc, policyName1)
defer exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "del", "default", "via", routePolicy1.nexthopaddr, "dev", routePolicy1.ifacename, "table", strconv.Itoa(routePolicy1.tableid))
configErr1 := routePolicy1.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("2.2 Configure NNCP for default route in custom route table")
policyName2 := "default-route-in-custom-table-66174"
routePolicy2 := routePolicyResource{
name: policyName2,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 66,
template: policyTemplate,
}
defer deleteNNCP(oc, policyName2)
defer exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "del", "default", "via", routePolicy2.nexthopaddr, "dev", routePolicy2.ifacename, "table", strconv.Itoa(routePolicy2.tableid))
configErr2 := routePolicy2.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
exutil.By("2.3 Verify the policies are applied")
nncpErr1 := checkNNCPStatus(oc, policyName1, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
nncpErr2 := checkNNCPStatus(oc, policyName2, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policies are applied")
exutil.By("2.4 Verify the status of enactments are updated")
nnceName1 := nodeName + "." + policyName1
nnceErr1 := checkNNCEStatus(oc, nnceName1, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
nnceName2 := nodeName + "." + policyName2
nnceErr2 := checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments are updated")
exutil.By("2.5 Verify the default routes found in node network state")
routes, nnsRoutesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.destination=="`+destAddr+`")]}`).Output()
o.Expect(nnsRoutesErr).NotTo(o.HaveOccurred())
o.Expect(routes).Should(o.ContainSubstring(routePolicy1.nexthopaddr))
o.Expect(routes).Should(o.ContainSubstring(routePolicy2.nexthopaddr))
e2e.Logf("SUCCESS - the default routes found in node network state")
exutil.By("2.6 Verify the default routes are shown on the node")
route1, routeErr1 := exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy1.tableid))
o.Expect(routeErr1).NotTo(o.HaveOccurred())
o.Expect(route1).Should(o.ContainSubstring("default via " + routePolicy1.nexthopaddr + " dev " + routePolicy1.ifacename))
route2, routeErr2 := exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy2.tableid))
o.Expect(routeErr2).NotTo(o.HaveOccurred())
o.Expect(route2).Should(o.ContainSubstring("default via " + routePolicy2.nexthopaddr + " dev " + routePolicy2.ifacename))
e2e.Logf("SUCCESS - default routes are shown on the node")
exutil.By("3. Remove default routes on node")
exutil.By("3.1 Configure NNCP for removing default route in main route table")
rmpolicyTemplate := generateTemplateAbsolutePath("remove-route-template.yaml")
routePolicy1 = routePolicyResource{
name: policyName1,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
state: "absent",
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 254,
template: rmpolicyTemplate,
}
configErr1 = routePolicy1.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("3.2 Configure NNCP for removing default route in custom route table")
routePolicy2 = routePolicyResource{
name: policyName2,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
state: "absent",
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 66,
template: rmpolicyTemplate,
}
configErr2 = routePolicy2.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
exutil.By("3.3 Verify the policies are applied")
nncpErr1 = checkNNCPStatus(oc, policyName1, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
nncpErr2 = checkNNCPStatus(oc, policyName2, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policies are applied")
exutil.By("3.4 Verify the status of enactments are updated")
nnceErr1 = checkNNCEStatus(oc, nnceName1, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
nnceErr2 = checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments are updated")
exutil.By("3.5 Verify the removed default routes cannot be found in node network state")
routes1, nnsRoutesErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.destination=="`+destAddr+`")]}`).Output()
o.Expect(nnsRoutesErr1).NotTo(o.HaveOccurred())
o.Expect(routes1).ShouldNot(o.ContainSubstring(routePolicy1.nexthopaddr))
o.Expect(routes1).ShouldNot(o.ContainSubstring(routePolicy2.nexthopaddr))
e2e.Logf("SUCCESS - the default routes cannot be found in node network state")
exutil.By("3.6 Verify the default routes are removed from the node")
route1, routeErr1 = exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy1.tableid))
o.Expect(routeErr1).NotTo(o.HaveOccurred())
o.Expect(route1).ShouldNot(o.ContainSubstring("default via " + routePolicy1.nexthopaddr + " dev " + routePolicy1.ifacename))
route2, routeErr2 = exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy2.tableid))
o.Expect(routeErr2).NotTo(o.HaveOccurred())
o.Expect(route2).ShouldNot(o.ContainSubstring("default via " + routePolicy2.nexthopaddr + " dev " + routePolicy2.ifacename))
e2e.Logf("SUCCESS - default routes are removed from the node")
})
g.It("Author:qiowang-NonPreRelease-Medium-71145-configure bond interface and 70 vlans based on the bond then reboot node, check the boot time [Disruptive] [Slow]", func() {
e2e.Logf("It is for OCPBUGS-22771, OCPBUGS-25753, OCPBUGS-26026")
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
var ifacesAdded []string
for i := 101; i <= 170; i++ {
ifacesAdded = append(ifacesAdded, "bond12."+strconv.Itoa(i))
}
ifacesAdded = append(ifacesAdded, "bond12", "dummy1", "dummy2")
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Create bond interface and 70 vlans based on the bond")
exutil.By("2.1 Configure NNCP for bond and vlans")
policyName := "ocpbug-22771-25753-26026-bond-70vlans"
bondPolicyTemplate := generateTemplateAbsolutePath("ocpbug-22771-25753-26026.yaml")
bondPolicy := bondPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "bond12",
descr: "test bond-vlans",
port1: "dummy1",
port2: "dummy2",
state: "up",
template: bondPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
allIfaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
var deferCmd string
for _, ifaceAdded := range ifacesAdded {
if strings.Contains(allIfaces, ifaceAdded) {
deferCmd = deferCmd + " nmcli con delete " + ifaceAdded + ";"
}
}
if deferCmd != "" {
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", deferCmd)
}
}()
configErr := configBond(oc, bondPolicy)
o.Expect(configErr).NotTo(o.HaveOccurred())
exutil.By("2.2 Verify the policy is applied")
nncpErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
exutil.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
exutil.By("2.4 Verify the bond and vlans found in node network state")
iface, nnsIfaceErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[*].name}`).Output()
o.Expect(nnsIfaceErr).NotTo(o.HaveOccurred())
for _, ifaceAdded := range ifacesAdded {
o.Expect(strings.Contains(iface, ifaceAdded)).Should(o.BeTrue())
}
e2e.Logf("SUCCESS - the bond and vlans found in node network state")
exutil.By("2.5 Verify the bond and vlans are shown on the node")
ifaceInfo, ifaceErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
for _, ifaceAdded := range ifacesAdded {
o.Expect(strings.Contains(ifaceInfo, ifaceAdded)).Should(o.BeTrue())
}
e2e.Logf("SUCCESS - bond and vlans are shown on the node")
exutil.By("3. Reboot the node")
defer checkNodeStatus(oc, nodeName, "Ready")
rebootNode(oc, nodeName)
checkNodeStatus(oc, nodeName, "NotReady")
checkNodeStatus(oc, nodeName, "Ready")
exutil.By("4. Check the boot time")
cmd := `systemd-analyze | head -1`
analyzeOutput, analyzeErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
o.Expect(analyzeErr).NotTo(o.HaveOccurred())
e2e.Logf("Expected boot time should be less than 3 minutes(180s)")
reTime := regexp.MustCompile(`(\(initrd\) \+ ?)([\s\S]+)( \(userspace\)?)`)
bootTime := reTime.FindStringSubmatch(analyzeOutput)[2]
e2e.Logf("boot time(userspace) is: %v", bootTime)
var totalSec int
if strings.Contains(bootTime, "min") {
reMin := regexp.MustCompile(`(\d+)min`)
getMin := reMin.FindStringSubmatch(bootTime)[1]
bootMin, _ := strconv.Atoi(getMin)
totalSec = totalSec + bootMin*60
}
reSec := regexp.MustCompile(`(\d+)(\.\d+)?s`)
getSec := reSec.FindStringSubmatch(bootTime)[1]
bootSec, _ := strconv.Atoi(getSec)
totalSec = totalSec + bootSec
e2e.Logf("boot total seconds(userspace) is: %v", totalSec)
o.Expect(totalSec < 180).To(o.BeTrue())
exutil.By("5. Check the node logs")
journalCmd := `journalctl -u ovs-configuration -b`
logs, logsErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", journalCmd)
o.Expect(logsErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(logs, "Cannot bring up connection br-ex after 10 attempts")).ShouldNot(o.BeTrue())
o.Expect(strings.Contains(logs, "configure-ovs exited with error")).ShouldNot(o.BeTrue())
})
g.It("Author:qiowang-Medium-73027-Verify vlan of bond will get autoconnect when bond ports link revived [Disruptive]", func() {
e2e.Logf("It is for OCPBUGS-11300, OCPBUGS-23023")
var (
ipAddr1V4 = "192.0.2.251"
ipAddr2V4 = "192.0.2.252"
ipAddr1V6 = "2001:db8::1:1"
ipAddr2V6 = "2001:db8::1:2"
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Create vlan over bond")
exutil.By("2.1 Configure NNCP for vlan over bond")
policyName := "ocpbug-11300-23023-vlan-over-bond"
bondVlanPolicyTemplate := generateTemplateAbsolutePath("ocpbug-11300-23023.yaml")
bondVlanPolicy := bondvlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
descr: "test bond-vlans",
bondname: "bond12",
port1: "dummy1",
port1type: "dummy",
port2: "dummy2",
port2type: "dummy",
vlanifname: "bond12.101",
vlanid: 101,
ipaddrv4: ipAddr1V4,
ipaddrv6: ipAddr1V6,
state: "up",
template: bondVlanPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
var ifacesAdded []string
ifacesAdded = append(ifacesAdded, bondVlanPolicy.vlanifname, bondVlanPolicy.bondname, bondVlanPolicy.port1, bondVlanPolicy.port2)
var deferCmd string
for _, ifaceAdded := range ifacesAdded {
if strings.Contains(ifaces, ifaceAdded) {
deferCmd = deferCmd + " nmcli con delete " + ifaceAdded + ";"
}
}
if deferCmd != "" {
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", deferCmd)
}
}()
configErr1 := bondVlanPolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
exutil.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
exutil.By("2.4 Verify the vlan interface ip addresses are shown correctly")
ipCmd := "ip address show " + bondVlanPolicy.vlanifname
ifaceInfo1, ifaceErr1 := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceInfo1, ipAddr1V4)).Should(o.BeTrue())
o.Expect(strings.Contains(ifaceInfo1, ipAddr1V6)).Should(o.BeTrue())
e2e.Logf("SUCCESS - vlan interface ip addresses are shown on the node")
exutil.By("3. edit nncp")
exutil.By("3.1 update ip address")
patchContent := `[{"op": "replace", "path": "/spec/desiredState/interfaces", "value": [{"name": "` + bondVlanPolicy.vlanifname + `", "type": "vlan", "state": "up", "vlan":{"base-iface": "` + bondVlanPolicy.bondname + `", "id": ` + strconv.Itoa(bondVlanPolicy.vlanid) + `}, "ipv4":{"address":[{"ip": "` + ipAddr2V4 + `", "prefix-length": 24}], "enabled":true}, "ipv6":{"address":[{"ip": "` + ipAddr2V6 + `", "prefix-length": 96}], "enabled":true}}]}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("nncp", policyName, "--type=json", "-p", patchContent).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
exutil.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
exutil.By("3.4 Verify the vlan interface ip addresses are shown correctly")
ifaceInfo2, ifaceErr2 := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceInfo2, ipAddr2V4)).Should(o.BeTrue())
o.Expect(strings.Contains(ifaceInfo2, ipAddr2V6)).Should(o.BeTrue())
e2e.Logf("SUCCESS - vlan interface ip addresses are shown on the node")
exutil.By("4. Bring all bond ports link down, wait for the vlan become inactive")
downPortCmd := "ip link set " + bondVlanPolicy.port1 + " down; ip link set " + bondVlanPolicy.port2 + " down"
_, downPortErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", downPortCmd)
o.Expect(downPortErr).NotTo(o.HaveOccurred())
vlanInfo1 := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
ifaceInfo, ifaceErr := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
if !strings.Contains(ifaceInfo, "inet") {
return true, nil
}
e2e.Logf("vlan still active and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(vlanInfo1, "Fail to inactive vlan")
exutil.By("5. Bring all bond ports link up again, vlan will reactive with the original ip addresses")
upPortCmd := "ip link set " + bondVlanPolicy.port1 + " up; ip link set " + bondVlanPolicy.port2 + " up"
_, upPortErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", upPortCmd)
o.Expect(upPortErr).NotTo(o.HaveOccurred())
vlanInfo2 := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
ifaceInfo, ifaceErr := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaceInfo, ipAddr2V4) && strings.Contains(ifaceInfo, ipAddr2V6) {
return true, nil
}
e2e.Logf("vlan still down and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(vlanInfo2, "Fail to reactive vlan with the original ip addresses")
})
g.It("Author:meinli-High-76212-Validate Metrics collection for kubernetes-nmstate [Disruptive]", func() {
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeName).NotTo(o.BeEmpty())
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Configure two NNCP for creating linux-bridge with hostname")
policyName := "br-test"
bridgePolicyTemplate1 := generateTemplateAbsolutePath("bridge-with-hostname-policy-template.yaml")
bridgePolicy := bridgehostnamePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
state: "up",
template: bridgePolicyTemplate1,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bridgePolicy.ifacename) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bridgePolicy.ifacename)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr1 := bridgePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, fmt.Sprintf("%s policy applied failed", policyName))
exutil.By("3. check the metrics value with proper gauge increased")
featureNames := []string{"dhcpv4-custom-hostname"}
expectedValues := []int{1}
metricPod := getPodName(oc, opNamespace, "component=kubernetes-nmstate-metrics")
o.Expect(metricPod).ShouldNot(o.BeEmpty())
metricCmd := "curl http://127.0.0.1:8089/metrics | grep kubernetes_nmstate_features_applied"
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
// validate the metrics value increased to 2 after applying again
deleteNNCP(oc, policyName)
configErr2 := bridgePolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, fmt.Sprintf("%s policy applied failed", policyName))
expectedValues = []int{2}
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
exutil.By("4. metrics value will decrease after update nncp with absent state")
patchCmd := `[{"op": "replace", "path": "/spec/desiredState/interfaces/0/state", "value": "absent" }]`
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("nncp", policyName, "--type=json", "-p", patchCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("%s policy updated failed", policyName))
// check the metrics value will decrease 1
expectedValues = []int{1}
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
})
g.It("Author:meinli-High-76372-Check NMstate Features Metrics Value collection [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/nmstate")
nmstateCRTemplate = filepath.Join(buildPruningBaseDir, "nmstate-cr-template.yaml")
dhcpHostnamePolicyTemplate = filepath.Join(buildPruningBaseDir, "dhcp-hostname-policy-template.yaml")
lldpPolicyTemplate = filepath.Join(buildPruningBaseDir, "lldp-policy-template.yaml")
ovnMappingPolicyTemplate = filepath.Join(buildPruningBaseDir, "ovn-mapping-policy-template.yaml")
ovsDBGlobalPolicyTemplate = filepath.Join(buildPruningBaseDir, "ovs-db-global-policy-template.yaml")
staticHostnamePolicyTemplate = filepath.Join(buildPruningBaseDir, "static-hostname-policy-template.yaml")
staticDNSPolicyTemplate = filepath.Join(buildPruningBaseDir, "global-dns-nncp-template.yaml")
dnsClearNncpTemplate = filepath.Join(buildPruningBaseDir, "global-dns-nncp-recover-template.yaml")
nodeSelectLabel = "kubernetes.io/hostname"
featureNames = []string{"dhcpv4-custom-hostname", "dhcpv6-custom-hostname", "lldp", "ovn-mapping", "ovs-db-global",
"static-hostname", "static-dns-name-server", "static-dns-search"}
expectedValues = []int{1, 1, 1, 1, 1, 1, 1, 1}
ipAddr string
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeName).NotTo(o.BeEmpty())
exutil.By("1. Create NMState CR")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Configure NNCPs for NMstate Features")
exutil.By("2.1 Configure NNCP for creating DhcpCustomHostname NMstate Feature")
dhcpHostnamePolicy := bridgehostnamePolicyResource{
name: "dhcphostname-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
ifacename: "dummy_dhcp",
state: "up",
template: dhcpHostnamePolicyTemplate,
}
defer deleteNNCP(oc, dhcpHostnamePolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, dhcpHostnamePolicy.ifacename) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", dhcpHostnamePolicy.ifacename)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr1 := dhcpHostnamePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
nncpErr1 := checkNNCPStatus(oc, dhcpHostnamePolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, fmt.Sprintf("%s policy applied failed", dhcpHostnamePolicy.name))
exutil.By("2.2 Configure NNCP for creating Lldp NMstate Feature")
lldpPolicy := bridgehostnamePolicyResource{
name: "lldp-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
ifacename: "dummy_lldp",
state: "up",
template: lldpPolicyTemplate,
}
defer deleteNNCP(oc, lldpPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, lldpPolicy.ifacename) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", lldpPolicy.ifacename)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr2 := lldpPolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
nncpErr2 := checkNNCPStatus(oc, lldpPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, fmt.Sprintf("%s policy applied failed", lldpPolicy.name))
exutil.By("2.3 Configure NNCP for creating OvnMapping NMstate Feature")
ovnMappingPolicy := ovnMappingPolicyResource{
name: "ovnmapping-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
localnet1: "blue",
bridge1: "ovsbr1",
template: ovnMappingPolicyTemplate,
}
defer deleteNNCP(oc, ovnMappingPolicy.name)
defer func() {
ovnmapping, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "get", "Open_vSwitch", ".", "external_ids:ovn-bridge-mappings")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ovnmapping, ovnMappingPolicy.localnet1) {
// ovs-vsctl can only use "set" to reserve some fields
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "set", "Open_vSwitch", ".", "external_ids:ovn-bridge-mappings=\"physnet:br-ex\"")
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr3 := ovnMappingPolicy.configNNCP(oc)
o.Expect(configErr3).NotTo(o.HaveOccurred())
nncpErr3 := checkNNCPStatus(oc, ovnMappingPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr3, fmt.Sprintf("%s policy applied failed", ovnMappingPolicy.name))
exutil.By("2.4 Configure NNCP for creating OvsDBGlobal NMstate Feature")
ovsDBGlobalPolicy := ovsDBGlobalPolicyResource{
name: "ovsdbglobal-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
ovsconfig: "n-handler-threads",
ovsvalue: "2",
template: ovsDBGlobalPolicyTemplate,
}
defer deleteNNCP(oc, ovsDBGlobalPolicy.name)
defer func() {
ovsdb, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "get", "Open_vSwitch", ".", "other_config")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ovsdb, ovsDBGlobalPolicy.ovsconfig) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "remove", "Open_vSwitch", ".", "other_config", ovsDBGlobalPolicy.ovsconfig)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr4 := ovsDBGlobalPolicy.configNNCP(oc)
o.Expect(configErr4).NotTo(o.HaveOccurred())
nncpErr4 := checkNNCPStatus(oc, ovsDBGlobalPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr4, fmt.Sprintf("%s policy applied failed", ovsDBGlobalPolicy.name))
exutil.By("2.5 Configure NNCP for creating StaticHostname NMstate Feature")
staticHostnamePolicy := staticHostnamePolicyResource{
name: "statichostname-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
hostdomain: nodeName,
template: staticHostnamePolicyTemplate,
}
defer deleteNNCP(oc, staticHostnamePolicy.name)
defer func() {
hostname, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "hostnamectl")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if !strings.Contains(hostname, nodeName) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "hostnamectl", "set-hostname", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr5 := staticHostnamePolicy.configNNCP(oc)
o.Expect(configErr5).NotTo(o.HaveOccurred())
ncpErr5 := checkNNCPStatus(oc, staticHostnamePolicy.name, "Available")
exutil.AssertWaitPollNoErr(ncpErr5, fmt.Sprintf("%s policy applied failed", staticHostnamePolicy.name))
exutil.By("2.6 Configure NNCP for creating StaticDNS NMstate Feature")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
ipAddr = "2003::3"
}
if ipStackType == "ipv4single" {
ipAddr = "8.8.8.8"
}
dnsServerIP1 := getAvaliableNameServer(oc, nodeName)
staticDNSPolicy := staticDNSPolicyResource{
name: "staticdns-test",
nodeName: nodeName,
dnsdomain: "example.com",
serverip1: dnsServerIP1,
serverip2: ipAddr,
template: staticDNSPolicyTemplate,
}
defer deleteNNCP(oc, staticDNSPolicy.name)
defer func() {
//configure nncp with empty dns server to clear configuration
nncpDns_clear := networkingRes{
name: "dns-" + getRandomString(),
namespace: opNamespace,
kind: "NodeNetworkConfigurationPolicy",
tempfile: dnsClearNncpTemplate,
}
nncpDns_clear.create(oc, "NAME="+nncpDns_clear.name, "NAMESPACE="+nncpDns_clear.namespace, "NODE="+nodeName)
nncpErr1 := checkNNCPStatus(oc, nncpDns_clear.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
removeResource(oc, true, true, nncpDns_clear.kind, nncpDns_clear.name, "-n", nncpDns_clear.namespace)
}()
configErr6 := staticDNSPolicy.configNNCP(oc)
o.Expect(configErr6).NotTo(o.HaveOccurred())
ncpErr6 := checkNNCPStatus(oc, staticDNSPolicy.name, "Available")
exutil.AssertWaitPollNoErr(ncpErr6, fmt.Sprintf("%s policy applied failed", staticDNSPolicy.name))
exutil.By("3. Check Metrics value for above NMstate Features")
metricPod := getPodName(oc, opNamespace, "component=kubernetes-nmstate-metrics")
o.Expect(metricPod).ShouldNot(o.BeEmpty())
metricCmd := "curl http://127.0.0.1:8089/metrics | grep kubernetes_nmstate_features_applied"
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
})
})
var _ = g.Describe("[sig-networking] SDN nmstate-operator upgrade", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-nmstate", exutil.KubeConfigPath())
opNamespace = "openshift-nmstate"
opName = "kubernetes-nmstate-operator"
policyNamePreUpgrade = "bond-policy-54077"
policyNamePstUpgrade = "vlan-policy-54077"
bondInfName = "bond54077"
bondPort1 = "dummy5"
bondPort2 = "dummy6"
vlanBaseInf = "dummy7"
bondPolicyTemplate = generateTemplateAbsolutePath("bond-policy-template.yaml")
vlanPolicyTemplate = generateTemplateAbsolutePath("vlan-policy-template.yaml")
)
g.BeforeEach(func() {
g.By("Check the platform if it is suitable for running the test")
if !(isPlatformSuitableForNMState(oc)) {
g.Skip("Skipping for unsupported platform!")
}
})
g.It("Author:qiowang-PreChkUpgrade-NonPreRelease-Medium-54077-Verify that the knmstate operator works as expected after the cluster upgrade [Disruptive]", func() {
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
exutil.By("1. install knmstate operator")
installNMstateOperator(oc)
exutil.By("2. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
exutil.By("3. Creating bond on node")
exutil.By("3.1 Configure NNCP for creating bond")
bondPolicy := bondPolicyResource{
name: policyNamePreUpgrade,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: bondInfName,
descr: "create bond",
port1: bondPort1,
port2: bondPort2,
state: "up",
template: bondPolicyTemplate,
}
configErr := configBond(oc, bondPolicy)
o.Expect(configErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Verify the policy is applied")
nncpErr := checkNNCPStatus(oc, policyNamePreUpgrade, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
exutil.By("3.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyNamePreUpgrade
nnceErr := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr, "status of enactments updated failed")
exutil.By("3.4 Verify the bond is up and active on the node")
ifaceList, ifaceErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceList, bondPolicy.ifacename)).Should(o.BeTrue())
exutil.By("3.5 Verify the created bond found in node network state")
ifaceState, nnsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+bondPolicy.ifacename+`")].state}`).Output()
o.Expect(nnsErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceState, "up")).Should(o.BeTrue())
})
g.It("Author:qiowang-PstChkUpgrade-NonPreRelease-Medium-54077-Verify that the knmstate operator works as expected after the cluster upgrade [Disruptive]", func() {
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
defer removeResource(oc, true, true, "nmstate", "nmstate", "-n", opNamespace)
defer deleteNNCP(oc, policyNamePreUpgrade)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bondInfName) {
cmd := "nmcli con delete " + bondInfName + "; nmcli con delete " + bondPort1 + "; nmcli con delete " + bondPort2
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
}
}()
exutil.By("1. Check NMState CSV is upgraded")
majorVer, _, verErr := exutil.GetClusterVersion(oc)
o.Expect(verErr).NotTo(o.HaveOccurred())
e2e.Logf("ocp major version: %s", majorVer)
csvOutput, csvErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", opNamespace).Output()
o.Expect(csvErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(csvOutput, opName+"."+majorVer)).Should(o.BeTrue())
exutil.By("2. Check NMState CRs are running")
result, crErr := checkNmstateCR(oc, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "check nmstate cr failed")
o.Expect(result).To(o.BeTrue())
exutil.By("3. Check NNCP created before upgrade is still Available")
exutil.By("3.1 Verify the policy is Available")
nncpErr1 := checkNNCPStatus(oc, policyNamePreUpgrade, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
exutil.By("3.2 Verify the status of enactments is Available")
nnceName1 := nodeName + "." + policyNamePreUpgrade
nnceErr1 := checkNNCEStatus(oc, nnceName1, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
exutil.By("3.3 Verify the bond is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceList1, bondInfName)).Should(o.BeTrue())
exutil.By("3.4 Verify the created bond found in node network state")
ifaceState1, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+bondInfName+`")].state}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceState1, "up")).Should(o.BeTrue())
exutil.By("4. Create new NNCP after upgrade")
exutil.By("4.1 Configure NNCP for creating vlan")
vlanPolicy := vlanPolicyResource{
name: policyNamePstUpgrade,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: vlanBaseInf + ".101",
descr: "create vlan",
baseiface: vlanBaseInf,
vlanid: 101,
state: "up",
template: vlanPolicyTemplate,
}
defer deleteNNCP(oc, policyNamePstUpgrade)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, vlanPolicy.ifacename) {
cmd := `nmcli con delete ` + vlanPolicy.ifacename + `; nmcli con delete ` + vlanPolicy.baseiface
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
}
}()
configErr1 := vlanPolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("4.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyNamePstUpgrade, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
exutil.By("4.3 Verify the status of enactments is updated")
nnceName2 := nodeName + "." + policyNamePstUpgrade
nnceErr2 := checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
exutil.By("4.4 Verify the vlan is up and active on the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceList2, vlanPolicy.ifacename)).Should(o.BeTrue())
exutil.By("4.5 Verify the created vlan found in node network state")
ifaceState2, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+vlanPolicy.ifacename+`")].state}`).Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceState2, "up")).Should(o.BeTrue())
})
})
var _ = g.Describe("[sig-networking] SDN nmstate-operator testing on plateforms including Azure", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-nmstate", exutil.KubeConfigPath())
opNamespace = "openshift-nmstate"
workers = exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker")
)
g.BeforeEach(func() {
g.By("Check the platform if it is suitable for running the test")
platform := checkPlatform(oc)
e2e.Logf("platform is %v", platform)
if !(isPlatformSuitableForNMState(oc)) && !strings.Contains(platform, "azure") {
g.Skip("It is not a suitable platform, it is not Azure either. Skip this testing!")
}
if len(workers) < 1 {
g.Skip("These cases can only be run for cluster that has atleast one worker nodes. Skip this testing")
}
installNMstateOperator(oc)
})
g.It("Author:yingwang-NonPreRelease-Medium-75671-Verify global DNS via NMstate [Disruptive]", func() {
var (
nmstateCRTemplate = generateTemplateAbsolutePath("nmstate-cr-template.yaml")
dnsNncpTemplate = generateTemplateAbsolutePath("global-dns-nncp-template.yaml")
dnsDomain = "testglobal.com"
ipAddr string
)
ipStackType := checkIPStackType(oc)
switch ipStackType {
case "ipv4single":
ipAddr = "8.8.8.8"
case "dualstack":
ipAddr = "2003::3"
case "ipv6single":
ipAddr = "2003::3"
default:
e2e.Logf("Get ipStackType as %s", ipStackType)
g.Skip("Skip for not supported IP stack type!! ")
}
g.By("1. Create NMState CR")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Create NNCP for Gloabal DNS")
g.By("2.1 create policy")
dnsServerIP1 := getAvaliableNameServer(oc, workers[0])
dnsServerIP2 := ipAddr
nncpDns := networkingRes{
name: "dns-" + getRandomString(),
namespace: opNamespace,
kind: "NodeNetworkConfigurationPolicy",
tempfile: dnsNncpTemplate,
}
defer func() {
removeResource(oc, true, true, nncpDns.kind, nncpDns.name, "-n", nncpDns.namespace)
//configure nncp with empty dns server to clear configuration
dnsClearNncpTemplate := generateTemplateAbsolutePath("global-dns-nncp-recover-template.yaml")
nncpDns_clear := networkingRes{
name: "dns-" + getRandomString(),
namespace: opNamespace,
kind: "NodeNetworkConfigurationPolicy",
tempfile: dnsClearNncpTemplate,
}
nncpDns_clear.create(oc, "NAME="+nncpDns_clear.name, "NAMESPACE="+nncpDns_clear.namespace, "NODE="+workers[0])
nncpErr1 := checkNNCPStatus(oc, nncpDns_clear.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName_clear := workers[0] + "." + nncpDns_clear.name
nnceErr1 := checkNNCEStatus(oc, nnceName_clear, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
removeResource(oc, true, true, nncpDns_clear.kind, nncpDns_clear.name, "-n", nncpDns_clear.namespace)
}()
nncpDns.create(oc, "NAME="+nncpDns.name, "NAMESPACE="+nncpDns.namespace, "NODE="+workers[0], "DNSDOMAIN="+dnsDomain,
"SERVERIP1="+dnsServerIP1, "SERVERIP2="+dnsServerIP2)
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, nncpDns.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := workers[0] + "." + nncpDns.name
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify dns server record")
dnsServerIP := make([]string, 2)
dnsServerIP[0] = dnsServerIP1
dnsServerIP[1] = dnsServerIP2
checkDNSServer(oc, workers[0], dnsDomain, dnsServerIP)
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
8ce3601e-49ee-42aa-918f-86fdacc65a91
|
Author:qiowang-LEVEL0-StagerunBoth-Critical-47088-NMState Operator installation
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-LEVEL0-StagerunBoth-Critical-47088-NMState Operator installation ", func() {
g.By("Checking nmstate operator installation")
e2e.Logf("Operator install check successfull as part of setup !!!!!")
e2e.Logf("SUCCESS - NMState operator installed")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1db2cf13-49cf-49ee-9d83-e1bced4e953d
|
Author:qiowang-NonPreRelease-Longduration-High-46380-High-46382-High-46379-Create/Disable/Remove interface on node [Disruptive] [Slow]
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-NonPreRelease-Longduration-High-46380-High-46382-High-46379-Create/Disable/Remove interface on node [Disruptive] [Slow]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. OCP-46380-Creating interface on node")
g.By("2.1 Configure NNCP for creating interface")
policyName := "dummy-policy-46380"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
ifacePolicyTemplate := generateTemplateAbsolutePath("iface-policy-template.yaml")
ifacePolicy := ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy0",
descr: "create interface",
ifacetype: "dummy",
state: "up",
template: ifacePolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, ifacePolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", ifacePolicy.ifacename)
}
}()
result, configErr1 := configIface(oc, ifacePolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created interface found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[?(@.name==\"dummy0\")].state}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
e2e.Logf("SUCCESS - the created interface found in node network state")
g.By("2.5 Verify the interface is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
matched, matchErr1 := regexp.MatchString("dummy\\s+dummy0", ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - interface is up and active on the node")
g.By("3. OCP-46382-Disabling interface on node")
g.By("3.1 Configure NNCP for disabling interface")
ifacePolicy = ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy0",
descr: "disable interface",
ifacetype: "dummy",
state: "down",
template: ifacePolicyTemplate,
}
result, configErr2 := configIface(oc, ifacePolicy)
o.Expect(configErr2).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no disabled interface found in node network state")
ifaceName1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceName1).ShouldNot(o.ContainSubstring("dummy0"))
e2e.Logf("SUCCESS - no disabled interface found in node network state")
g.By("3.5 Verify the interface is down on the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
matched, matchErr2 := regexp.MatchString("dummy\\s+--", ifaceList2)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - interface is down on the node")
g.By("4. OCP-46379-Removing interface on node")
g.By("4.1 Configure NNCP for removing interface")
ifacePolicy = ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy0",
descr: "remove interface",
ifacetype: "dummy",
state: "absent",
template: ifacePolicyTemplate,
}
result, configErr3 := configIface(oc, ifacePolicy)
o.Expect(configErr3).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
g.By("4.2 Verify the policy is applied")
nncpErr3 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr3, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("4.3 Verify the status of enactments is updated")
nnceErr3 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr3, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("4.4 Verify no removed interface found in node network state")
ifaceName2, nnsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr3).NotTo(o.HaveOccurred())
o.Expect(ifaceName2).ShouldNot(o.ContainSubstring("dummy0"))
e2e.Logf("SUCCESS - no removed interface found in node network state")
g.By("4.5 Verify the interface is removed from the node")
ifaceList3, ifaceErr3 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr3).NotTo(o.HaveOccurred())
matched, matchErr3 := regexp.MatchString("dummy0", ifaceList3)
o.Expect(matchErr3).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeFalse())
e2e.Logf("SUCCESS - interface is removed from the node")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d1c7e897-c27e-4305-ac19-3a9d3d1f9437
|
Author:qiowang-LEVEL0-Critical-46329-Configure bond on node [Disruptive]
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-LEVEL0-Critical-46329-Configure bond on node [Disruptive]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Creating bond on node")
g.By("2.1 Configure NNCP for creating bond")
policyName := "bond-policy-46329"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
bondPolicyTemplate := generateTemplateAbsolutePath("bond-policy-template.yaml")
bondPolicy := bondPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "bond01",
descr: "create bond",
port1: "dummy1",
port2: "dummy2",
state: "up",
template: bondPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bondPolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bondPolicy.port1)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bondPolicy.port2)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bondPolicy.ifacename)
}
}()
configErr1 := configBond(oc, bondPolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created bond found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="bond01")].state}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
e2e.Logf("SUCCESS - the created bond found in node network state")
g.By("2.5 Verify the bond is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
matched, matchErr1 := regexp.MatchString("bond\\s+bond01", ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - bond is up and active on the node")
g.By("3. Remove bond on node")
g.By("3.1 Configure NNCP for removing bond")
bondPolicy = bondPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "bond01",
descr: "remove bond",
port1: "dummy1",
port2: "dummy2",
state: "absent",
template: bondPolicyTemplate,
}
configErr2 := configBond(oc, bondPolicy)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no removed bond found in node network state")
ifaceName1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceName1).ShouldNot(o.ContainSubstring("bond01"))
e2e.Logf("SUCCESS - no removed bond found in node network state")
g.By("3.5 Verify the bond is removed from the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
matched, matchErr2 := regexp.MatchString("bond01", ifaceList2)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeFalse())
e2e.Logf("SUCCESS - bond is removed from the node")
})
| |||||
test case
|
openshift/openshift-tests-private
|
48b52cba-f0b9-4b0b-a9b3-ea5171772eb4
|
Author:qiowang-Medium-46383-VLAN [Disruptive]
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-Medium-46383-VLAN [Disruptive]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Creating vlan on node")
g.By("2.1 Configure NNCP for creating vlan")
policyName := "vlan-policy-46383"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeList).NotTo(o.BeEmpty())
nodeName := nodeList[0]
vlanPolicyTemplate := generateTemplateAbsolutePath("vlan-policy-template.yaml")
vlanPolicy := vlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy3.101",
descr: "create vlan",
baseiface: "dummy3",
vlanid: 101,
state: "up",
template: vlanPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, vlanPolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", vlanPolicy.ifacename)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", vlanPolicy.baseiface)
}
}()
configErr1 := vlanPolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created vlan found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+vlanPolicy.ifacename+`")].state}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
e2e.Logf("SUCCESS - the created vlan found in node network state")
g.By("2.5 Verify the vlan is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
matched, matchErr1 := regexp.MatchString("vlan\\s+"+vlanPolicy.ifacename, ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched).To(o.BeTrue())
e2e.Logf("SUCCESS - vlan is up and active on the node")
g.By("3. Remove vlan on node")
g.By("3.1 Configure NNCP for removing vlan")
vlanPolicy = vlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummy3.101",
descr: "remove vlan",
baseiface: "dummy3",
vlanid: 101,
state: "absent",
template: vlanPolicyTemplate,
}
configErr2 := vlanPolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no removed vlan found in node network state")
ifaceName1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceName1).ShouldNot(o.ContainSubstring(vlanPolicy.ifacename))
e2e.Logf("SUCCESS - no removed vlan found in node network state")
g.By("3.5 Verify the vlan is removed from the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceList2).ShouldNot(o.ContainSubstring(vlanPolicy.ifacename))
e2e.Logf("SUCCESS - vlan is removed from the node")
})
| |||||
test case
|
openshift/openshift-tests-private
|
87fa8e4f-57b0-4ab1-9bd7-422393f562ff
|
Author:qiowang-Medium-53346-Verify that it is able to reset linux-bridge vlan-filtering with vlan is empty [Disruptive]
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-Medium-53346-Verify that it is able to reset linux-bridge vlan-filtering with vlan is empty [Disruptive]", func() {
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Creating linux-bridge with vlan-filtering")
g.By("2.1 Configure NNCP for creating linux-bridge")
policyName := "bridge-policy-53346"
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeList).NotTo(o.BeEmpty())
nodeName := nodeList[0]
bridgePolicyTemplate1 := generateTemplateAbsolutePath("bridge-policy-template.yaml")
bridgePolicy := bridgevlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
descr: "create linux-bridge with vlan-filtering",
port: "dummy4",
state: "up",
template: bridgePolicyTemplate1,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bridgePolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bridgePolicy.port)
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bridgePolicy.ifacename)
}
}()
configErr1 := bridgePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the created bridge found in node network state")
ifaceState, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="linux-br0")].state}`).Output()
bridgePort1, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="linux-br0")].bridge.port[?(@.name=="dummy4")]}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(ifaceState).Should(o.ContainSubstring("up"))
o.Expect(bridgePort1).Should(o.ContainSubstring("vlan"))
e2e.Logf("SUCCESS - the created bridge found in node network state")
g.By("2.5 Verify the bridge is up and active, vlan-filtering is enabled")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
vlanFilter1, vlanErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show", bridgePolicy.ifacename)
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(vlanErr1).NotTo(o.HaveOccurred())
matched1, matchErr1 := regexp.MatchString("bridge\\s+"+bridgePolicy.ifacename, ifaceList1)
o.Expect(matchErr1).NotTo(o.HaveOccurred())
o.Expect(matched1).To(o.BeTrue())
matched2, matchErr2 := regexp.MatchString("bridge.vlan-filtering:\\s+yes", vlanFilter1)
o.Expect(matchErr2).NotTo(o.HaveOccurred())
o.Expect(matched2).To(o.BeTrue())
e2e.Logf("SUCCESS - bridge is up and active, vlan-filtering is enabled")
g.By("3. Reset linux-bridge vlan-filtering with vlan: {}")
g.By("3.1 Configure NNCP for reset linux-bridge vlan-filtering")
bridgePolicyTemplate2 := generateTemplateAbsolutePath("reset-bridge-vlan-policy-template.yaml")
bridgePolicy = bridgevlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
descr: "reset linux-bridge vlan-filtering",
port: "dummy4",
state: "up",
template: bridgePolicyTemplate2,
}
configErr2 := bridgePolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("3.4 Verify no linux-bridge vlan-filtering found in node network state")
bridgePort2, nnsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="linux-br0")].bridge.port[?(@.name=="dummy4")]}`).Output()
o.Expect(nnsErr3).NotTo(o.HaveOccurred())
o.Expect(bridgePort2).ShouldNot(o.ContainSubstring("vlan"))
e2e.Logf("SUCCESS - no linux-bridge vlan-filtering found in node network state")
g.By("3.5 Verify the linux-bridge vlan-filtering is disabled")
vlanFilter2, vlanErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show", bridgePolicy.ifacename)
o.Expect(vlanErr2).NotTo(o.HaveOccurred())
matched3, matchErr3 := regexp.MatchString("bridge.vlan-filtering:\\s+no", vlanFilter2)
o.Expect(matchErr3).NotTo(o.HaveOccurred())
o.Expect(matched3).To(o.BeTrue())
e2e.Logf("SUCCESS - linux-bridge vlan-filtering is disabled")
g.By("4. Remove linux-bridge on node")
g.By("4.1 Configure NNCP for remove linux-bridge")
bridgePolicy = bridgevlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
descr: "remove linux-bridge",
port: "dummy4",
state: "absent",
template: bridgePolicyTemplate2,
}
configErr3 := bridgePolicy.configNNCP(oc)
o.Expect(configErr3).NotTo(o.HaveOccurred())
g.By("4.2 Verify the policy is applied")
nncpErr3 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr3, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("4.3 Verify the status of enactments is updated")
nnceErr3 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr3, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("4.4 Verify no removed linux-bridge found in node network state")
ifaceName2, nnsErr4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*].name}").Output()
o.Expect(nnsErr4).NotTo(o.HaveOccurred())
o.Expect(ifaceName2).ShouldNot(o.ContainSubstring(bridgePolicy.ifacename))
e2e.Logf("SUCCESS - no removed linux-bridge found in node network state")
g.By("4.5 Verify the linux-bridge is removed from the node")
ifaceList2, ifaceErr3 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr3).NotTo(o.HaveOccurred())
o.Expect(ifaceList2).ShouldNot(o.ContainSubstring(bridgePolicy.ifacename))
e2e.Logf("SUCCESS - linux-bridge is removed from the node")
})
| |||||
test case
|
openshift/openshift-tests-private
|
2db803c5-c651-4c54-83dd-2a9d4861c0df
|
Author:qiowang-NonPreRelease-Medium-46327-Medium-46795-Medium-64854-Static IP and Route can be applied [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-NonPreRelease-Medium-46327-Medium-46795-Medium-64854-Static IP and Route can be applied [Disruptive]", func() {
var (
ipAddrV4 = "192.0.2.251"
destAddrV4 = "198.51.100.0/24"
nextHopAddrV4 = "192.0.2.1"
ipAddrV6 = "2001:db8::1:1"
destAddrV6 = "2001:dc8::/64"
nextHopAddrV6 = "2001:db8::1:2"
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
g.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Apply static IP and Route on node")
g.By("2.1 Configure NNCP for static IP and Route")
policyName := "static-ip-route-46327"
policyTemplate := generateTemplateAbsolutePath("apply-static-ip-route-template.yaml")
stIPRoutePolicy := stIPRoutePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummyst",
descr: "apply static ip and route",
state: "up",
ipaddrv4: ipAddrV4,
destaddrv4: destAddrV4,
nexthopaddrv4: nextHopAddrV4,
ipaddrv6: ipAddrV6,
destaddrv6: destAddrV6,
nexthopaddrv6: nextHopAddrV6,
template: policyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, stIPRoutePolicy.ifacename) {
exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", stIPRoutePolicy.ifacename)
}
}()
configErr := stIPRoutePolicy.configNNCP(oc)
o.Expect(configErr).NotTo(o.HaveOccurred())
g.By("2.2 Verify the policy is applied")
nncpErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify the static ip and route found in node network state")
iface, nnsIfaceErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+stIPRoutePolicy.ifacename+`")]}`).Output()
o.Expect(nnsIfaceErr).NotTo(o.HaveOccurred())
o.Expect(iface).Should(o.ContainSubstring(ipAddrV4))
o.Expect(iface).Should(o.ContainSubstring(ipAddrV6))
routes, nnsRoutesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.next-hop-interface=="`+stIPRoutePolicy.ifacename+`")]}`).Output()
o.Expect(nnsRoutesErr).NotTo(o.HaveOccurred())
o.Expect(routes).Should(o.ContainSubstring(destAddrV4))
o.Expect(routes).Should(o.ContainSubstring(nextHopAddrV4))
o.Expect(routes).Should(o.ContainSubstring(destAddrV6))
o.Expect(routes).Should(o.ContainSubstring(nextHopAddrV6))
e2e.Logf("SUCCESS - the static ip and route found in node network state")
g.By("2.5 Verify the static ip and route are shown on the node")
ifaceInfo, ifaceErr := exutil.DebugNode(oc, nodeName, "ip", "addr", "show", stIPRoutePolicy.ifacename)
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
o.Expect(ifaceInfo).Should(o.ContainSubstring(ipAddrV4))
o.Expect(ifaceInfo).Should(o.ContainSubstring(ipAddrV6))
v4Routes, routesV4Err := exutil.DebugNode(oc, nodeName, "ip", "-4", "route")
o.Expect(routesV4Err).NotTo(o.HaveOccurred())
o.Expect(v4Routes).Should(o.ContainSubstring(destAddrV4 + " via " + nextHopAddrV4 + " dev " + stIPRoutePolicy.ifacename))
v6Routes, routesV6Err := exutil.DebugNode(oc, nodeName, "ip", "-6", "route")
o.Expect(routesV6Err).NotTo(o.HaveOccurred())
o.Expect(v6Routes).Should(o.ContainSubstring(destAddrV6 + " via " + nextHopAddrV6 + " dev " + stIPRoutePolicy.ifacename))
e2e.Logf("SUCCESS - static ip and route are shown on the node")
// Step3 is for https://issues.redhat.com/browse/OCPBUGS-8229
g.By("3. Apply default gateway in non-default route table")
g.By("3.1 Configure NNCP for default gateway")
policyName2 := "default-route-64854"
policyTemplate2 := generateTemplateAbsolutePath("apply-route-template.yaml")
routePolicy := routePolicyResource{
name: policyName2,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummyst",
destaddr: "0.0.0.0/0",
nexthopaddr: nextHopAddrV4,
tableid: 66,
template: policyTemplate2,
}
defer removeResource(oc, true, true, "nncp", policyName2, "-n", opNamespace)
configErr2 := routePolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
g.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName2, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
g.By("3.3 Verify the status of enactments is updated")
nnceName2 := nodeName + "." + policyName2
nnceErr2 := checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
g.By("3.4 Verify the default gateway found in node network state")
routes, nnsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.table-id==66)]}`).Output()
o.Expect(nnsErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(routes, "0.0.0.0/0")).Should(o.BeTrue())
o.Expect(strings.Contains(routes, nextHopAddrV4)).Should(o.BeTrue())
g.By("3.5 Verify the default gateway is shown on the node")
defaultGW, gwErr := exutil.DebugNode(oc, nodeName, "ip", "-4", "route", "show", "default", "table", "66")
o.Expect(gwErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(defaultGW, "default via "+nextHopAddrV4+" dev "+stIPRoutePolicy.ifacename)).Should(o.BeTrue())
g.By("3.6 Verify there is no error logs for pinging default gateway shown in nmstate-handler pod")
podName, getPodErr := exutil.GetPodName(oc, opNamespace, "component=kubernetes-nmstate-handler", nodeName)
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
logs, logErr := exutil.GetSpecificPodLogs(oc, opNamespace, "", podName, "")
o.Expect(logErr).ShouldNot(o.HaveOccurred())
o.Expect(logs).NotTo(o.BeEmpty())
o.Expect(strings.Contains(logs, "error pinging default gateway")).Should(o.BeFalse())
g.By("4. Remove static ip and route on node")
g.By("4.1 Configure NNCP for removing static ip and route")
policyTemplate = generateTemplateAbsolutePath("remove-static-ip-route-template.yaml")
stIPRoutePolicy = stIPRoutePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "dummyst",
descr: "remove static ip and route",
state: "absent",
ipaddrv4: ipAddrV4,
ipaddrv6: ipAddrV6,
template: policyTemplate,
}
configErr1 := stIPRoutePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
g.By("4.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("4.3 Verify the status of enactments is updated")
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("4.4 Verify static ip and route cannot be found in node network state")
iface1, nnsIfaceErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-ojsonpath={.status.currentState.interfaces[*]}").Output()
o.Expect(nnsIfaceErr1).NotTo(o.HaveOccurred())
o.Expect(iface1).ShouldNot(o.ContainSubstring(stIPRoutePolicy.ifacename))
o.Expect(iface1).ShouldNot(o.ContainSubstring(ipAddrV4))
o.Expect(iface1).ShouldNot(o.ContainSubstring(ipAddrV6))
routes1, nnsRoutesErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes}`).Output()
o.Expect(nnsRoutesErr1).NotTo(o.HaveOccurred())
o.Expect(routes1).ShouldNot(o.ContainSubstring(destAddrV4))
o.Expect(routes1).ShouldNot(o.ContainSubstring(nextHopAddrV4))
o.Expect(routes1).ShouldNot(o.ContainSubstring(destAddrV6))
o.Expect(routes1).ShouldNot(o.ContainSubstring(nextHopAddrV6))
g.By("4.5 Verify the static ip and route are removed from the node")
ifaceInfo1, ifaceErr1 := exutil.DebugNode(oc, nodeName, "ip", "addr", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(ifaceInfo1).ShouldNot(o.ContainSubstring(stIPRoutePolicy.ifacename))
o.Expect(ifaceInfo1).ShouldNot(o.ContainSubstring(ipAddrV4))
o.Expect(ifaceInfo1).ShouldNot(o.ContainSubstring(ipAddrV6))
v4Routes1, routesV4Err1 := exutil.DebugNode(oc, nodeName, "ip", "-4", "route")
o.Expect(routesV4Err1).NotTo(o.HaveOccurred())
o.Expect(v4Routes1).ShouldNot(o.ContainSubstring(destAddrV4 + " via " + nextHopAddrV4 + " dev " + stIPRoutePolicy.ifacename))
v6Routes1, routesV6Err1 := exutil.DebugNode(oc, nodeName, "ip", "-6", "route")
o.Expect(routesV6Err1).NotTo(o.HaveOccurred())
o.Expect(v6Routes1).ShouldNot(o.ContainSubstring(destAddrV6 + " via " + nextHopAddrV6 + " dev " + stIPRoutePolicy.ifacename))
e2e.Logf("SUCCESS - static ip and route are removed from the node")
})
| |||||
test case
|
openshift/openshift-tests-private
|
25583550-fada-49a3-986b-2c558973fa01
|
Author:qiowang-NonPreRelease-Medium-66174-Verify knmstate operator support for IPv6 single stack - ipv6 default route [Disruptive]
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-NonPreRelease-Medium-66174-Verify knmstate operator support for IPv6 single stack - ipv6 default route [Disruptive]", func() {
exutil.By("Check the platform if it is suitable for running the test")
platform := checkPlatform(oc)
ipStackType := checkIPStackType(oc)
if ipStackType != "ipv6single" || !strings.Contains(platform, "baremetal") {
g.Skip("Should be tested on IPv6 single stack platform(IPI BM), skipping!")
}
var (
destAddr = "::/0"
nextHopAddr = "fd00:1101::1"
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
cmd := `nmcli dev | grep -v 'ovs' | egrep 'ethernet +connected' | awk '{print $1}'`
ifNameInfo, ifNameErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
o.Expect(ifNameErr).NotTo(o.HaveOccurred())
ifName := strings.Split(ifNameInfo, "\n")[0]
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Apply default routes on node")
exutil.By("2.1 Configure NNCP for default route in main route table")
policyTemplate := generateTemplateAbsolutePath("apply-route-template.yaml")
policyName1 := "default-route-in-main-table-66174"
routePolicy1 := routePolicyResource{
name: policyName1,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 254,
template: policyTemplate,
}
defer deleteNNCP(oc, policyName1)
defer exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "del", "default", "via", routePolicy1.nexthopaddr, "dev", routePolicy1.ifacename, "table", strconv.Itoa(routePolicy1.tableid))
configErr1 := routePolicy1.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("2.2 Configure NNCP for default route in custom route table")
policyName2 := "default-route-in-custom-table-66174"
routePolicy2 := routePolicyResource{
name: policyName2,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 66,
template: policyTemplate,
}
defer deleteNNCP(oc, policyName2)
defer exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "del", "default", "via", routePolicy2.nexthopaddr, "dev", routePolicy2.ifacename, "table", strconv.Itoa(routePolicy2.tableid))
configErr2 := routePolicy2.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
exutil.By("2.3 Verify the policies are applied")
nncpErr1 := checkNNCPStatus(oc, policyName1, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
nncpErr2 := checkNNCPStatus(oc, policyName2, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policies are applied")
exutil.By("2.4 Verify the status of enactments are updated")
nnceName1 := nodeName + "." + policyName1
nnceErr1 := checkNNCEStatus(oc, nnceName1, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
nnceName2 := nodeName + "." + policyName2
nnceErr2 := checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments are updated")
exutil.By("2.5 Verify the default routes found in node network state")
routes, nnsRoutesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.destination=="`+destAddr+`")]}`).Output()
o.Expect(nnsRoutesErr).NotTo(o.HaveOccurred())
o.Expect(routes).Should(o.ContainSubstring(routePolicy1.nexthopaddr))
o.Expect(routes).Should(o.ContainSubstring(routePolicy2.nexthopaddr))
e2e.Logf("SUCCESS - the default routes found in node network state")
exutil.By("2.6 Verify the default routes are shown on the node")
route1, routeErr1 := exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy1.tableid))
o.Expect(routeErr1).NotTo(o.HaveOccurred())
o.Expect(route1).Should(o.ContainSubstring("default via " + routePolicy1.nexthopaddr + " dev " + routePolicy1.ifacename))
route2, routeErr2 := exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy2.tableid))
o.Expect(routeErr2).NotTo(o.HaveOccurred())
o.Expect(route2).Should(o.ContainSubstring("default via " + routePolicy2.nexthopaddr + " dev " + routePolicy2.ifacename))
e2e.Logf("SUCCESS - default routes are shown on the node")
exutil.By("3. Remove default routes on node")
exutil.By("3.1 Configure NNCP for removing default route in main route table")
rmpolicyTemplate := generateTemplateAbsolutePath("remove-route-template.yaml")
routePolicy1 = routePolicyResource{
name: policyName1,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
state: "absent",
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 254,
template: rmpolicyTemplate,
}
configErr1 = routePolicy1.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("3.2 Configure NNCP for removing default route in custom route table")
routePolicy2 = routePolicyResource{
name: policyName2,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifName,
state: "absent",
destaddr: destAddr,
nexthopaddr: nextHopAddr,
tableid: 66,
template: rmpolicyTemplate,
}
configErr2 = routePolicy2.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
exutil.By("3.3 Verify the policies are applied")
nncpErr1 = checkNNCPStatus(oc, policyName1, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
nncpErr2 = checkNNCPStatus(oc, policyName2, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policies are applied")
exutil.By("3.4 Verify the status of enactments are updated")
nnceErr1 = checkNNCEStatus(oc, nnceName1, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
nnceErr2 = checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments are updated")
exutil.By("3.5 Verify the removed default routes cannot be found in node network state")
routes1, nnsRoutesErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.routes.config[?(@.destination=="`+destAddr+`")]}`).Output()
o.Expect(nnsRoutesErr1).NotTo(o.HaveOccurred())
o.Expect(routes1).ShouldNot(o.ContainSubstring(routePolicy1.nexthopaddr))
o.Expect(routes1).ShouldNot(o.ContainSubstring(routePolicy2.nexthopaddr))
e2e.Logf("SUCCESS - the default routes cannot be found in node network state")
exutil.By("3.6 Verify the default routes are removed from the node")
route1, routeErr1 = exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy1.tableid))
o.Expect(routeErr1).NotTo(o.HaveOccurred())
o.Expect(route1).ShouldNot(o.ContainSubstring("default via " + routePolicy1.nexthopaddr + " dev " + routePolicy1.ifacename))
route2, routeErr2 = exutil.DebugNode(oc, nodeName, "ip", "-6", "route", "show", "default", "table", strconv.Itoa(routePolicy2.tableid))
o.Expect(routeErr2).NotTo(o.HaveOccurred())
o.Expect(route2).ShouldNot(o.ContainSubstring("default via " + routePolicy2.nexthopaddr + " dev " + routePolicy2.ifacename))
e2e.Logf("SUCCESS - default routes are removed from the node")
})
| |||||
test case
|
openshift/openshift-tests-private
|
8fee6e5b-01d5-4795-8c98-644663a7ef92
|
Author:qiowang-NonPreRelease-Medium-71145-configure bond interface and 70 vlans based on the bond then reboot node, check the boot time [Disruptive] [Slow]
|
['"regexp"', '"strconv"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-NonPreRelease-Medium-71145-configure bond interface and 70 vlans based on the bond then reboot node, check the boot time [Disruptive] [Slow]", func() {
e2e.Logf("It is for OCPBUGS-22771, OCPBUGS-25753, OCPBUGS-26026")
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
var ifacesAdded []string
for i := 101; i <= 170; i++ {
ifacesAdded = append(ifacesAdded, "bond12."+strconv.Itoa(i))
}
ifacesAdded = append(ifacesAdded, "bond12", "dummy1", "dummy2")
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Create bond interface and 70 vlans based on the bond")
exutil.By("2.1 Configure NNCP for bond and vlans")
policyName := "ocpbug-22771-25753-26026-bond-70vlans"
bondPolicyTemplate := generateTemplateAbsolutePath("ocpbug-22771-25753-26026.yaml")
bondPolicy := bondPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "bond12",
descr: "test bond-vlans",
port1: "dummy1",
port2: "dummy2",
state: "up",
template: bondPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
allIfaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
var deferCmd string
for _, ifaceAdded := range ifacesAdded {
if strings.Contains(allIfaces, ifaceAdded) {
deferCmd = deferCmd + " nmcli con delete " + ifaceAdded + ";"
}
}
if deferCmd != "" {
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", deferCmd)
}
}()
configErr := configBond(oc, bondPolicy)
o.Expect(configErr).NotTo(o.HaveOccurred())
exutil.By("2.2 Verify the policy is applied")
nncpErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
exutil.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
exutil.By("2.4 Verify the bond and vlans found in node network state")
iface, nnsIfaceErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[*].name}`).Output()
o.Expect(nnsIfaceErr).NotTo(o.HaveOccurred())
for _, ifaceAdded := range ifacesAdded {
o.Expect(strings.Contains(iface, ifaceAdded)).Should(o.BeTrue())
}
e2e.Logf("SUCCESS - the bond and vlans found in node network state")
exutil.By("2.5 Verify the bond and vlans are shown on the node")
ifaceInfo, ifaceErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
for _, ifaceAdded := range ifacesAdded {
o.Expect(strings.Contains(ifaceInfo, ifaceAdded)).Should(o.BeTrue())
}
e2e.Logf("SUCCESS - bond and vlans are shown on the node")
exutil.By("3. Reboot the node")
defer checkNodeStatus(oc, nodeName, "Ready")
rebootNode(oc, nodeName)
checkNodeStatus(oc, nodeName, "NotReady")
checkNodeStatus(oc, nodeName, "Ready")
exutil.By("4. Check the boot time")
cmd := `systemd-analyze | head -1`
analyzeOutput, analyzeErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
o.Expect(analyzeErr).NotTo(o.HaveOccurred())
e2e.Logf("Expected boot time should be less than 3 minutes(180s)")
reTime := regexp.MustCompile(`(\(initrd\) \+ ?)([\s\S]+)( \(userspace\)?)`)
bootTime := reTime.FindStringSubmatch(analyzeOutput)[2]
e2e.Logf("boot time(userspace) is: %v", bootTime)
var totalSec int
if strings.Contains(bootTime, "min") {
reMin := regexp.MustCompile(`(\d+)min`)
getMin := reMin.FindStringSubmatch(bootTime)[1]
bootMin, _ := strconv.Atoi(getMin)
totalSec = totalSec + bootMin*60
}
reSec := regexp.MustCompile(`(\d+)(\.\d+)?s`)
getSec := reSec.FindStringSubmatch(bootTime)[1]
bootSec, _ := strconv.Atoi(getSec)
totalSec = totalSec + bootSec
e2e.Logf("boot total seconds(userspace) is: %v", totalSec)
o.Expect(totalSec < 180).To(o.BeTrue())
exutil.By("5. Check the node logs")
journalCmd := `journalctl -u ovs-configuration -b`
logs, logsErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", journalCmd)
o.Expect(logsErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(logs, "Cannot bring up connection br-ex after 10 attempts")).ShouldNot(o.BeTrue())
o.Expect(strings.Contains(logs, "configure-ovs exited with error")).ShouldNot(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
0961d05b-7b91-4c73-b398-e9f9790c4f97
|
Author:qiowang-Medium-73027-Verify vlan of bond will get autoconnect when bond ports link revived [Disruptive]
|
['"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-Medium-73027-Verify vlan of bond will get autoconnect when bond ports link revived [Disruptive]", func() {
e2e.Logf("It is for OCPBUGS-11300, OCPBUGS-23023")
var (
ipAddr1V4 = "192.0.2.251"
ipAddr2V4 = "192.0.2.252"
ipAddr1V6 = "2001:db8::1:1"
ipAddr2V6 = "2001:db8::1:2"
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Create vlan over bond")
exutil.By("2.1 Configure NNCP for vlan over bond")
policyName := "ocpbug-11300-23023-vlan-over-bond"
bondVlanPolicyTemplate := generateTemplateAbsolutePath("ocpbug-11300-23023.yaml")
bondVlanPolicy := bondvlanPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
descr: "test bond-vlans",
bondname: "bond12",
port1: "dummy1",
port1type: "dummy",
port2: "dummy2",
port2type: "dummy",
vlanifname: "bond12.101",
vlanid: 101,
ipaddrv4: ipAddr1V4,
ipaddrv6: ipAddr1V6,
state: "up",
template: bondVlanPolicyTemplate,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
var ifacesAdded []string
ifacesAdded = append(ifacesAdded, bondVlanPolicy.vlanifname, bondVlanPolicy.bondname, bondVlanPolicy.port1, bondVlanPolicy.port2)
var deferCmd string
for _, ifaceAdded := range ifacesAdded {
if strings.Contains(ifaces, ifaceAdded) {
deferCmd = deferCmd + " nmcli con delete " + ifaceAdded + ";"
}
}
if deferCmd != "" {
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", deferCmd)
}
}()
configErr1 := bondVlanPolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
exutil.By("2.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyName
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
exutil.By("2.4 Verify the vlan interface ip addresses are shown correctly")
ipCmd := "ip address show " + bondVlanPolicy.vlanifname
ifaceInfo1, ifaceErr1 := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceInfo1, ipAddr1V4)).Should(o.BeTrue())
o.Expect(strings.Contains(ifaceInfo1, ipAddr1V6)).Should(o.BeTrue())
e2e.Logf("SUCCESS - vlan interface ip addresses are shown on the node")
exutil.By("3. edit nncp")
exutil.By("3.1 update ip address")
patchContent := `[{"op": "replace", "path": "/spec/desiredState/interfaces", "value": [{"name": "` + bondVlanPolicy.vlanifname + `", "type": "vlan", "state": "up", "vlan":{"base-iface": "` + bondVlanPolicy.bondname + `", "id": ` + strconv.Itoa(bondVlanPolicy.vlanid) + `}, "ipv4":{"address":[{"ip": "` + ipAddr2V4 + `", "prefix-length": 24}], "enabled":true}, "ipv6":{"address":[{"ip": "` + ipAddr2V6 + `", "prefix-length": 96}], "enabled":true}}]}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("nncp", policyName, "--type=json", "-p", patchContent).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
exutil.By("3.3 Verify the status of enactments is updated")
nnceErr2 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
exutil.By("3.4 Verify the vlan interface ip addresses are shown correctly")
ifaceInfo2, ifaceErr2 := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceInfo2, ipAddr2V4)).Should(o.BeTrue())
o.Expect(strings.Contains(ifaceInfo2, ipAddr2V6)).Should(o.BeTrue())
e2e.Logf("SUCCESS - vlan interface ip addresses are shown on the node")
exutil.By("4. Bring all bond ports link down, wait for the vlan become inactive")
downPortCmd := "ip link set " + bondVlanPolicy.port1 + " down; ip link set " + bondVlanPolicy.port2 + " down"
_, downPortErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", downPortCmd)
o.Expect(downPortErr).NotTo(o.HaveOccurred())
vlanInfo1 := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
ifaceInfo, ifaceErr := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
if !strings.Contains(ifaceInfo, "inet") {
return true, nil
}
e2e.Logf("vlan still active and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(vlanInfo1, "Fail to inactive vlan")
exutil.By("5. Bring all bond ports link up again, vlan will reactive with the original ip addresses")
upPortCmd := "ip link set " + bondVlanPolicy.port1 + " up; ip link set " + bondVlanPolicy.port2 + " up"
_, upPortErr := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", upPortCmd)
o.Expect(upPortErr).NotTo(o.HaveOccurred())
vlanInfo2 := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
ifaceInfo, ifaceErr := exutil.DebugNode(oc, nodeName, "bash", "-c", ipCmd)
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaceInfo, ipAddr2V4) && strings.Contains(ifaceInfo, ipAddr2V6) {
return true, nil
}
e2e.Logf("vlan still down and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(vlanInfo2, "Fail to reactive vlan with the original ip addresses")
})
| |||||
test case
|
openshift/openshift-tests-private
|
bed16e18-4f10-4875-a311-78ef5c4fc434
|
Author:meinli-High-76212-Validate Metrics collection for kubernetes-nmstate [Disruptive]
|
['"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:meinli-High-76212-Validate Metrics collection for kubernetes-nmstate [Disruptive]", func() {
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeName).NotTo(o.BeEmpty())
exutil.By("1. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Configure two NNCP for creating linux-bridge with hostname")
policyName := "br-test"
bridgePolicyTemplate1 := generateTemplateAbsolutePath("bridge-with-hostname-policy-template.yaml")
bridgePolicy := bridgehostnamePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: "linux-br0",
state: "up",
template: bridgePolicyTemplate1,
}
defer deleteNNCP(oc, policyName)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bridgePolicy.ifacename) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", bridgePolicy.ifacename)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr1 := bridgePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, fmt.Sprintf("%s policy applied failed", policyName))
exutil.By("3. check the metrics value with proper gauge increased")
featureNames := []string{"dhcpv4-custom-hostname"}
expectedValues := []int{1}
metricPod := getPodName(oc, opNamespace, "component=kubernetes-nmstate-metrics")
o.Expect(metricPod).ShouldNot(o.BeEmpty())
metricCmd := "curl http://127.0.0.1:8089/metrics | grep kubernetes_nmstate_features_applied"
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
// validate the metrics value increased to 2 after applying again
deleteNNCP(oc, policyName)
configErr2 := bridgePolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
nncpErr2 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, fmt.Sprintf("%s policy applied failed", policyName))
expectedValues = []int{2}
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
exutil.By("4. metrics value will decrease after update nncp with absent state")
patchCmd := `[{"op": "replace", "path": "/spec/desiredState/interfaces/0/state", "value": "absent" }]`
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("nncp", policyName, "--type=json", "-p", patchCmd).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("%s policy updated failed", policyName))
// check the metrics value will decrease 1
expectedValues = []int{1}
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d7f57fe0-8c2a-4c0b-98b3-dca5fbf352be
|
Author:meinli-High-76372-Check NMstate Features Metrics Value collection [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:meinli-High-76372-Check NMstate Features Metrics Value collection [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/nmstate")
nmstateCRTemplate = filepath.Join(buildPruningBaseDir, "nmstate-cr-template.yaml")
dhcpHostnamePolicyTemplate = filepath.Join(buildPruningBaseDir, "dhcp-hostname-policy-template.yaml")
lldpPolicyTemplate = filepath.Join(buildPruningBaseDir, "lldp-policy-template.yaml")
ovnMappingPolicyTemplate = filepath.Join(buildPruningBaseDir, "ovn-mapping-policy-template.yaml")
ovsDBGlobalPolicyTemplate = filepath.Join(buildPruningBaseDir, "ovs-db-global-policy-template.yaml")
staticHostnamePolicyTemplate = filepath.Join(buildPruningBaseDir, "static-hostname-policy-template.yaml")
staticDNSPolicyTemplate = filepath.Join(buildPruningBaseDir, "global-dns-nncp-template.yaml")
dnsClearNncpTemplate = filepath.Join(buildPruningBaseDir, "global-dns-nncp-recover-template.yaml")
nodeSelectLabel = "kubernetes.io/hostname"
featureNames = []string{"dhcpv4-custom-hostname", "dhcpv6-custom-hostname", "lldp", "ovn-mapping", "ovs-db-global",
"static-hostname", "static-dns-name-server", "static-dns-search"}
expectedValues = []int{1, 1, 1, 1, 1, 1, 1, 1}
ipAddr string
)
nodeName, getNodeErr := exutil.GetFirstWorkerNode(oc)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
o.Expect(nodeName).NotTo(o.BeEmpty())
exutil.By("1. Create NMState CR")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("2. Configure NNCPs for NMstate Features")
exutil.By("2.1 Configure NNCP for creating DhcpCustomHostname NMstate Feature")
dhcpHostnamePolicy := bridgehostnamePolicyResource{
name: "dhcphostname-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
ifacename: "dummy_dhcp",
state: "up",
template: dhcpHostnamePolicyTemplate,
}
defer deleteNNCP(oc, dhcpHostnamePolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, dhcpHostnamePolicy.ifacename) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", dhcpHostnamePolicy.ifacename)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr1 := dhcpHostnamePolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
nncpErr1 := checkNNCPStatus(oc, dhcpHostnamePolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, fmt.Sprintf("%s policy applied failed", dhcpHostnamePolicy.name))
exutil.By("2.2 Configure NNCP for creating Lldp NMstate Feature")
lldpPolicy := bridgehostnamePolicyResource{
name: "lldp-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
ifacename: "dummy_lldp",
state: "up",
template: lldpPolicyTemplate,
}
defer deleteNNCP(oc, lldpPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, lldpPolicy.ifacename) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "delete", lldpPolicy.ifacename)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr2 := lldpPolicy.configNNCP(oc)
o.Expect(configErr2).NotTo(o.HaveOccurred())
nncpErr2 := checkNNCPStatus(oc, lldpPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, fmt.Sprintf("%s policy applied failed", lldpPolicy.name))
exutil.By("2.3 Configure NNCP for creating OvnMapping NMstate Feature")
ovnMappingPolicy := ovnMappingPolicyResource{
name: "ovnmapping-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
localnet1: "blue",
bridge1: "ovsbr1",
template: ovnMappingPolicyTemplate,
}
defer deleteNNCP(oc, ovnMappingPolicy.name)
defer func() {
ovnmapping, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "get", "Open_vSwitch", ".", "external_ids:ovn-bridge-mappings")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ovnmapping, ovnMappingPolicy.localnet1) {
// ovs-vsctl can only use "set" to reserve some fields
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "set", "Open_vSwitch", ".", "external_ids:ovn-bridge-mappings=\"physnet:br-ex\"")
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr3 := ovnMappingPolicy.configNNCP(oc)
o.Expect(configErr3).NotTo(o.HaveOccurred())
nncpErr3 := checkNNCPStatus(oc, ovnMappingPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr3, fmt.Sprintf("%s policy applied failed", ovnMappingPolicy.name))
exutil.By("2.4 Configure NNCP for creating OvsDBGlobal NMstate Feature")
ovsDBGlobalPolicy := ovsDBGlobalPolicyResource{
name: "ovsdbglobal-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
ovsconfig: "n-handler-threads",
ovsvalue: "2",
template: ovsDBGlobalPolicyTemplate,
}
defer deleteNNCP(oc, ovsDBGlobalPolicy.name)
defer func() {
ovsdb, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "get", "Open_vSwitch", ".", "other_config")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ovsdb, ovsDBGlobalPolicy.ovsconfig) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "ovs-vsctl", "remove", "Open_vSwitch", ".", "other_config", ovsDBGlobalPolicy.ovsconfig)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr4 := ovsDBGlobalPolicy.configNNCP(oc)
o.Expect(configErr4).NotTo(o.HaveOccurred())
nncpErr4 := checkNNCPStatus(oc, ovsDBGlobalPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr4, fmt.Sprintf("%s policy applied failed", ovsDBGlobalPolicy.name))
exutil.By("2.5 Configure NNCP for creating StaticHostname NMstate Feature")
staticHostnamePolicy := staticHostnamePolicyResource{
name: "statichostname-test",
nodelabel: nodeSelectLabel,
labelvalue: nodeName,
hostdomain: nodeName,
template: staticHostnamePolicyTemplate,
}
defer deleteNNCP(oc, staticHostnamePolicy.name)
defer func() {
hostname, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "hostnamectl")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if !strings.Contains(hostname, nodeName) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "hostnamectl", "set-hostname", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
configErr5 := staticHostnamePolicy.configNNCP(oc)
o.Expect(configErr5).NotTo(o.HaveOccurred())
ncpErr5 := checkNNCPStatus(oc, staticHostnamePolicy.name, "Available")
exutil.AssertWaitPollNoErr(ncpErr5, fmt.Sprintf("%s policy applied failed", staticHostnamePolicy.name))
exutil.By("2.6 Configure NNCP for creating StaticDNS NMstate Feature")
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
ipAddr = "2003::3"
}
if ipStackType == "ipv4single" {
ipAddr = "8.8.8.8"
}
dnsServerIP1 := getAvaliableNameServer(oc, nodeName)
staticDNSPolicy := staticDNSPolicyResource{
name: "staticdns-test",
nodeName: nodeName,
dnsdomain: "example.com",
serverip1: dnsServerIP1,
serverip2: ipAddr,
template: staticDNSPolicyTemplate,
}
defer deleteNNCP(oc, staticDNSPolicy.name)
defer func() {
//configure nncp with empty dns server to clear configuration
nncpDns_clear := networkingRes{
name: "dns-" + getRandomString(),
namespace: opNamespace,
kind: "NodeNetworkConfigurationPolicy",
tempfile: dnsClearNncpTemplate,
}
nncpDns_clear.create(oc, "NAME="+nncpDns_clear.name, "NAMESPACE="+nncpDns_clear.namespace, "NODE="+nodeName)
nncpErr1 := checkNNCPStatus(oc, nncpDns_clear.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
removeResource(oc, true, true, nncpDns_clear.kind, nncpDns_clear.name, "-n", nncpDns_clear.namespace)
}()
configErr6 := staticDNSPolicy.configNNCP(oc)
o.Expect(configErr6).NotTo(o.HaveOccurred())
ncpErr6 := checkNNCPStatus(oc, staticDNSPolicy.name, "Available")
exutil.AssertWaitPollNoErr(ncpErr6, fmt.Sprintf("%s policy applied failed", staticDNSPolicy.name))
exutil.By("3. Check Metrics value for above NMstate Features")
metricPod := getPodName(oc, opNamespace, "component=kubernetes-nmstate-metrics")
o.Expect(metricPod).ShouldNot(o.BeEmpty())
metricCmd := "curl http://127.0.0.1:8089/metrics | grep kubernetes_nmstate_features_applied"
o.Eventually(func() bool {
metricOutput, err := exutil.RemoteShPodWithBash(oc, opNamespace, metricPod[0], metricCmd)
o.Expect(err).NotTo(o.HaveOccurred())
return extractMetricValue(metricOutput, featureNames, expectedValues)
}, 10*time.Second, 2*time.Second).Should(o.BeTrue(), "Metric does not match the expected value!!")
})
| |||||
test case
|
openshift/openshift-tests-private
|
7789d8a2-6e77-4647-aab5-8c9070c8fa54
|
Author:qiowang-PreChkUpgrade-NonPreRelease-Medium-54077-Verify that the knmstate operator works as expected after the cluster upgrade [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-PreChkUpgrade-NonPreRelease-Medium-54077-Verify that the knmstate operator works as expected after the cluster upgrade [Disruptive]", func() {
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
exutil.By("1. install knmstate operator")
installNMstateOperator(oc)
exutil.By("2. Create NMState CR")
nmstateCRTemplate := generateTemplateAbsolutePath("nmstate-cr-template.yaml")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
exutil.By("3. Creating bond on node")
exutil.By("3.1 Configure NNCP for creating bond")
bondPolicy := bondPolicyResource{
name: policyNamePreUpgrade,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: bondInfName,
descr: "create bond",
port1: bondPort1,
port2: bondPort2,
state: "up",
template: bondPolicyTemplate,
}
configErr := configBond(oc, bondPolicy)
o.Expect(configErr).NotTo(o.HaveOccurred())
exutil.By("3.2 Verify the policy is applied")
nncpErr := checkNNCPStatus(oc, policyNamePreUpgrade, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
exutil.By("3.3 Verify the status of enactments is updated")
nnceName := nodeName + "." + policyNamePreUpgrade
nnceErr := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr, "status of enactments updated failed")
exutil.By("3.4 Verify the bond is up and active on the node")
ifaceList, ifaceErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceList, bondPolicy.ifacename)).Should(o.BeTrue())
exutil.By("3.5 Verify the created bond found in node network state")
ifaceState, nnsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+bondPolicy.ifacename+`")].state}`).Output()
o.Expect(nnsErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceState, "up")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7ac027b3-a10d-4732-825d-aea9a597895a
|
Author:qiowang-PstChkUpgrade-NonPreRelease-Medium-54077-Verify that the knmstate operator works as expected after the cluster upgrade [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:qiowang-PstChkUpgrade-NonPreRelease-Medium-54077-Verify that the knmstate operator works as expected after the cluster upgrade [Disruptive]", func() {
nodeList, getNodeErr := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
nodeName := nodeList[0]
defer removeResource(oc, true, true, "nmstate", "nmstate", "-n", opNamespace)
defer deleteNNCP(oc, policyNamePreUpgrade)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, bondInfName) {
cmd := "nmcli con delete " + bondInfName + "; nmcli con delete " + bondPort1 + "; nmcli con delete " + bondPort2
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
}
}()
exutil.By("1. Check NMState CSV is upgraded")
majorVer, _, verErr := exutil.GetClusterVersion(oc)
o.Expect(verErr).NotTo(o.HaveOccurred())
e2e.Logf("ocp major version: %s", majorVer)
csvOutput, csvErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", opNamespace).Output()
o.Expect(csvErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(csvOutput, opName+"."+majorVer)).Should(o.BeTrue())
exutil.By("2. Check NMState CRs are running")
result, crErr := checkNmstateCR(oc, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "check nmstate cr failed")
o.Expect(result).To(o.BeTrue())
exutil.By("3. Check NNCP created before upgrade is still Available")
exutil.By("3.1 Verify the policy is Available")
nncpErr1 := checkNNCPStatus(oc, policyNamePreUpgrade, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
exutil.By("3.2 Verify the status of enactments is Available")
nnceName1 := nodeName + "." + policyNamePreUpgrade
nnceErr1 := checkNNCEStatus(oc, nnceName1, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
exutil.By("3.3 Verify the bond is up and active on the node")
ifaceList1, ifaceErr1 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceList1, bondInfName)).Should(o.BeTrue())
exutil.By("3.4 Verify the created bond found in node network state")
ifaceState1, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+bondInfName+`")].state}`).Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceState1, "up")).Should(o.BeTrue())
exutil.By("4. Create new NNCP after upgrade")
exutil.By("4.1 Configure NNCP for creating vlan")
vlanPolicy := vlanPolicyResource{
name: policyNamePstUpgrade,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: vlanBaseInf + ".101",
descr: "create vlan",
baseiface: vlanBaseInf,
vlanid: 101,
state: "up",
template: vlanPolicyTemplate,
}
defer deleteNNCP(oc, policyNamePstUpgrade)
defer func() {
ifaces, deferErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
if strings.Contains(ifaces, vlanPolicy.ifacename) {
cmd := `nmcli con delete ` + vlanPolicy.ifacename + `; nmcli con delete ` + vlanPolicy.baseiface
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cmd)
}
}()
configErr1 := vlanPolicy.configNNCP(oc)
o.Expect(configErr1).NotTo(o.HaveOccurred())
exutil.By("4.2 Verify the policy is applied")
nncpErr2 := checkNNCPStatus(oc, policyNamePstUpgrade, "Available")
exutil.AssertWaitPollNoErr(nncpErr2, "policy applied failed")
exutil.By("4.3 Verify the status of enactments is updated")
nnceName2 := nodeName + "." + policyNamePstUpgrade
nnceErr2 := checkNNCEStatus(oc, nnceName2, "Available")
exutil.AssertWaitPollNoErr(nnceErr2, "status of enactments updated failed")
exutil.By("4.4 Verify the vlan is up and active on the node")
ifaceList2, ifaceErr2 := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
o.Expect(ifaceErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceList2, vlanPolicy.ifacename)).Should(o.BeTrue())
exutil.By("4.5 Verify the created vlan found in node network state")
ifaceState2, nnsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, `-ojsonpath={.status.currentState.interfaces[?(@.name=="`+vlanPolicy.ifacename+`")].state}`).Output()
o.Expect(nnsErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ifaceState2, "up")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
759a2e06-91ca-4924-a7a3-b239383bd2b7
|
Author:yingwang-NonPreRelease-Medium-75671-Verify global DNS via NMstate [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate.go
|
g.It("Author:yingwang-NonPreRelease-Medium-75671-Verify global DNS via NMstate [Disruptive]", func() {
var (
nmstateCRTemplate = generateTemplateAbsolutePath("nmstate-cr-template.yaml")
dnsNncpTemplate = generateTemplateAbsolutePath("global-dns-nncp-template.yaml")
dnsDomain = "testglobal.com"
ipAddr string
)
ipStackType := checkIPStackType(oc)
switch ipStackType {
case "ipv4single":
ipAddr = "8.8.8.8"
case "dualstack":
ipAddr = "2003::3"
case "ipv6single":
ipAddr = "2003::3"
default:
e2e.Logf("Get ipStackType as %s", ipStackType)
g.Skip("Skip for not supported IP stack type!! ")
}
g.By("1. Create NMState CR")
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
g.By("2. Create NNCP for Gloabal DNS")
g.By("2.1 create policy")
dnsServerIP1 := getAvaliableNameServer(oc, workers[0])
dnsServerIP2 := ipAddr
nncpDns := networkingRes{
name: "dns-" + getRandomString(),
namespace: opNamespace,
kind: "NodeNetworkConfigurationPolicy",
tempfile: dnsNncpTemplate,
}
defer func() {
removeResource(oc, true, true, nncpDns.kind, nncpDns.name, "-n", nncpDns.namespace)
//configure nncp with empty dns server to clear configuration
dnsClearNncpTemplate := generateTemplateAbsolutePath("global-dns-nncp-recover-template.yaml")
nncpDns_clear := networkingRes{
name: "dns-" + getRandomString(),
namespace: opNamespace,
kind: "NodeNetworkConfigurationPolicy",
tempfile: dnsClearNncpTemplate,
}
nncpDns_clear.create(oc, "NAME="+nncpDns_clear.name, "NAMESPACE="+nncpDns_clear.namespace, "NODE="+workers[0])
nncpErr1 := checkNNCPStatus(oc, nncpDns_clear.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName_clear := workers[0] + "." + nncpDns_clear.name
nnceErr1 := checkNNCEStatus(oc, nnceName_clear, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
removeResource(oc, true, true, nncpDns_clear.kind, nncpDns_clear.name, "-n", nncpDns_clear.namespace)
}()
nncpDns.create(oc, "NAME="+nncpDns.name, "NAMESPACE="+nncpDns.namespace, "NODE="+workers[0], "DNSDOMAIN="+dnsDomain,
"SERVERIP1="+dnsServerIP1, "SERVERIP2="+dnsServerIP2)
g.By("2.2 Verify the policy is applied")
nncpErr1 := checkNNCPStatus(oc, nncpDns.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
g.By("2.3 Verify the status of enactments is updated")
nnceName := workers[0] + "." + nncpDns.name
nnceErr1 := checkNNCEStatus(oc, nnceName, "Available")
exutil.AssertWaitPollNoErr(nnceErr1, "status of enactments updated failed")
e2e.Logf("SUCCESS - status of enactments is updated")
g.By("2.4 Verify dns server record")
dnsServerIP := make([]string, 2)
dnsServerIP[0] = dnsServerIP1
dnsServerIP[1] = dnsServerIP2
checkDNSServer(oc, workers[0], dnsDomain, dnsServerIP)
})
| ||||||
file
|
openshift/openshift-tests-private
|
8d229323-d2b7-49c8-9122-7374c31956c8
|
nmstate_util
|
import (
"fmt"
"net"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
// Package networking NMState operator tests
package networking
import (
"fmt"
"net"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type nmstateCRResource struct {
name string
template string
}
type ifacePolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
descr string
ifacetype string
state string
ipv6flag bool
template string
}
type bondPolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
descr string
state string
port1 string
port2 string
ipaddrv4 string
template string
}
type vlanPolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
descr string
state string
baseiface string
vlanid int
ipaddrv4 string
template string
}
type bridgevlanPolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
descr string
state string
port string
template string
}
type bridgehostnamePolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
state string
template string
}
type ovnMappingPolicyResource struct {
name string
nodelabel string
labelvalue string
localnet1 string
bridge1 string
template string
}
type ovsDBGlobalPolicyResource struct {
name string
nodelabel string
labelvalue string
ovsconfig string
ovsvalue string
template string
}
type staticHostnamePolicyResource struct {
name string
nodelabel string
labelvalue string
hostdomain string
template string
}
type staticDNSPolicyResource struct {
name string
namespace string
nodeName string
dnsdomain string
serverip1 string
serverip2 string
template string
}
type stIPRoutePolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
descr string
state string
ipaddrv4 string
destaddrv4 string
nexthopaddrv4 string
ipaddrv6 string
destaddrv6 string
nexthopaddrv6 string
template string
}
type routePolicyResource struct {
name string
nodelabel string
labelvalue string
ifacename string
state string
destaddr string
nexthopaddr string
tableid int
template string
}
type ipsecHost2hostPolicyResource struct {
name string
nodelabel string
labelvalue string
tunnelname string
left string
leftcert string
right string
mode string
rightsubnet string
template string
}
type bondvlanPolicyResource struct {
name string
nodelabel string
labelvalue string
descr string
state string
bondname string
port1 string
port1type string
port2 string
port2type string
vlanifname string
vlanid int
ipaddrv4 string
ipaddrv6 string
template string
}
func generateTemplateAbsolutePath(fileName string) string {
testDataDir := exutil.FixturePath("testdata", "networking/nmstate")
return filepath.Join(testDataDir, fileName)
}
func createNMStateCR(oc *exutil.CLI, nmstatecr nmstateCRResource, namespace string) (bool, error) {
g.By("Creating NMState CR from template")
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nmstatecr.template, "-p", "NAME="+nmstatecr.name)
if err != nil {
e2e.Logf("Error creating NMState CR %v", err)
return false, err
}
result, err := checkNmstateCR(oc, namespace)
return result, err
}
func checkNmstateCR(oc *exutil.CLI, namespace string) (bool, error) {
err := waitForPodWithLabelReady(oc, namespace, "component=kubernetes-nmstate-handler")
if err != nil {
e2e.Logf("nmstate-handler Pods did not transition to ready state %v", err)
return false, err
}
err = waitForPodWithLabelReady(oc, namespace, "component=kubernetes-nmstate-webhook")
if err != nil {
e2e.Logf("nmstate-webhook pod did not transition to ready state %v", err)
return false, err
}
err = waitForPodWithLabelReady(oc, namespace, "app=nmstate-console-plugin")
if err != nil {
e2e.Logf("nmstate-console-plugin pod did not transition to ready state %v", err)
return false, err
}
err = waitForPodWithLabelReady(oc, namespace, "component=kubernetes-nmstate-metrics")
if err != nil {
e2e.Logf("nmstate-metrics pod did not transition to ready state %v", err)
return false, err
}
e2e.Logf("nmstate-handler, nmstate-webhook, nmstate-console-plugin and nmstate-metrics pods created successfully")
return true, nil
}
func deleteNMStateCR(oc *exutil.CLI, rs nmstateCRResource) {
e2e.Logf("delete %s CR %s", "nmstate", rs.name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("nmstate", rs.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func configIface(oc *exutil.CLI, ifacepolicy ifacePolicyResource) (bool, error) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ifacepolicy.template, "-p", "NAME="+ifacepolicy.name, "NODELABEL="+ifacepolicy.nodelabel, "LABELVALUE="+ifacepolicy.labelvalue, "IFACENAME="+ifacepolicy.ifacename, "DESCR="+ifacepolicy.descr, "IFACETYPE="+ifacepolicy.ifacetype, "STATE="+ifacepolicy.state, "IPV6FLAG="+strconv.FormatBool(ifacepolicy.ipv6flag))
if err != nil {
e2e.Failf("Error configure interface %v", err)
return false, err
}
return true, nil
}
func configBond(oc *exutil.CLI, bondpolicy bondPolicyResource) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bondpolicy.template, "-p", "NAME="+bondpolicy.name, "NODELABEL="+bondpolicy.nodelabel, "LABELVALUE="+bondpolicy.labelvalue, "IFACENAME="+bondpolicy.ifacename, "DESCR="+bondpolicy.descr, "STATE="+bondpolicy.state, "PORT1="+bondpolicy.port1, "PORT2="+bondpolicy.port2)
if err != nil {
e2e.Logf("Error configure bond %v", err)
return err
}
return nil
}
func configBondWithIP(oc *exutil.CLI, bondpolicy bondPolicyResource) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bondpolicy.template, "-p", "NAME="+bondpolicy.name, "NODELABEL="+bondpolicy.nodelabel, "LABELVALUE="+bondpolicy.labelvalue, "IFACENAME="+bondpolicy.ifacename, "DESCR="+bondpolicy.descr, "STATE="+bondpolicy.state, "PORT1="+bondpolicy.port1, "PORT2="+bondpolicy.port2, "IPADDRV4="+bondpolicy.ipaddrv4)
if err != nil {
e2e.Logf("Error configure bond %v", err)
return err
}
return nil
}
func (vpr *vlanPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vpr.template, "-p", "NAME="+vpr.name, "NODELABEL="+vpr.nodelabel, "LABELVALUE="+vpr.labelvalue, "IFACENAME="+vpr.ifacename, "DESCR="+vpr.descr, "STATE="+vpr.state, "BASEIFACE="+vpr.baseiface, "VLANID="+strconv.Itoa(vpr.vlanid))
if err != nil {
e2e.Logf("Error configure vlan %v", err)
return err
}
return nil
}
func (vpr *vlanPolicyResource) configNNCPWithIP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vpr.template, "-p", "NAME="+vpr.name, "NODELABEL="+vpr.nodelabel, "LABELVALUE="+vpr.labelvalue, "IFACENAME="+vpr.ifacename, "DESCR="+vpr.descr, "STATE="+vpr.state, "BASEIFACE="+vpr.baseiface, "VLANID="+strconv.Itoa(vpr.vlanid), "IPADDRV4="+vpr.ipaddrv4)
if err != nil {
e2e.Logf("Error configure vlan %v", err)
return err
}
return nil
}
func (bvpr *bridgevlanPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "IFACENAME="+bvpr.ifacename, "DESCR="+bvpr.descr, "STATE="+bvpr.state, "PORT="+bvpr.port)
if err != nil {
e2e.Logf("Error configure bridge %v", err)
return err
}
return nil
}
func (bvpr *bridgehostnamePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "IFACENAME="+bvpr.ifacename, "STATE="+bvpr.state)
if err != nil {
e2e.Logf("Error configure bridge %v", err)
return err
}
return nil
}
func (bvpr *ovnMappingPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue,
"LOCALNET1="+bvpr.localnet1, "BRIDGE1="+bvpr.bridge1)
if err != nil {
e2e.Logf("Error configure ovnmapping %v", err)
return err
}
return nil
}
func (bvpr *ovsDBGlobalPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue,
"OVSCONFIG="+bvpr.ovsconfig, "OVSVALUE="+bvpr.ovsvalue)
if err != nil {
e2e.Logf("Error configure ovsDBGlobal %v", err)
return err
}
return nil
}
func (bvpr *staticHostnamePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "HOSTDOMAIN="+bvpr.hostdomain)
if err != nil {
e2e.Logf("Error configure staticHostname %v", err)
return err
}
return nil
}
func (bvpr *staticDNSPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NAMESPACE="+bvpr.namespace, "NODE="+bvpr.nodeName,
"DNSDOMAIN="+bvpr.dnsdomain, "SERVERIP1="+bvpr.serverip1, "SERVERIP2="+bvpr.serverip2)
if err != nil {
e2e.Logf("Error configure staticDNS %v", err)
return err
}
return nil
}
func (stpr *stIPRoutePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", stpr.template, "-p", "NAME="+stpr.name, "NODELABEL="+stpr.nodelabel, "LABELVALUE="+stpr.labelvalue, "IFACENAME="+stpr.ifacename, "DESCR="+stpr.descr, "STATE="+stpr.state,
"IPADDRV4="+stpr.ipaddrv4, "DESTADDRV4="+stpr.destaddrv4, "NEXTHOPADDRV4="+stpr.nexthopaddrv4, "IPADDRV6="+stpr.ipaddrv6, "DESTADDRV6="+stpr.destaddrv6, "NEXTHOPADDRV6="+stpr.nexthopaddrv6)
if err != nil {
e2e.Logf("Error configure static ip and route %v", err)
return err
}
return nil
}
func (rpr *routePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", rpr.template, "-p", "NAME="+rpr.name, "NODELABEL="+rpr.nodelabel, "LABELVALUE="+rpr.labelvalue, "IFACENAME="+rpr.ifacename, "STATE="+rpr.state,
"DESTADDR="+rpr.destaddr, "NEXTHOPADDR="+rpr.nexthopaddr, "ID="+strconv.Itoa(rpr.tableid))
if err != nil {
e2e.Logf("Error configure route %v", err)
return err
}
return nil
}
func (bvpr *bondvlanPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "DESCR="+bvpr.descr, "STATE="+bvpr.state, "BONDNAME="+bvpr.bondname, "PORT1="+bvpr.port1, "PORT1TYPE="+bvpr.port1type, "PORT2="+bvpr.port2, "PORT2TYPE="+bvpr.port2type, "VLANIFNAME="+bvpr.vlanifname, "VLANID="+strconv.Itoa(bvpr.vlanid), "IPADDRV4="+bvpr.ipaddrv4, "IPADDRV6="+bvpr.ipaddrv6)
if err != nil {
e2e.Logf("Error configure vlan over bond %v", err)
return err
}
return nil
}
func checkNNCPStatus(oc *exutil.CLI, policyName string, expectedStatus string) error {
return wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) {
e2e.Logf("Checking status of nncp %s", policyName)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nncp", policyName).Output()
if err != nil {
e2e.Logf("Failed to get nncp status, error:%s. Trying again", err)
return false, nil
}
if !strings.Contains(output, expectedStatus) {
e2e.Logf("nncp status does not meet expectation:%s, error:%s, output:%s. Trying again", expectedStatus, err, output)
return false, nil
}
return true, nil
})
}
func checkNNCEStatus(oc *exutil.CLI, nnceName string, expectedStatus string) error {
return wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) {
e2e.Logf("Checking status of nnce %s", nnceName)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nnce", nnceName).Output()
if err != nil {
e2e.Logf("Failed to get nnce status, error:%s. Trying again", err)
return false, nil
}
if !strings.Contains(output, expectedStatus) {
e2e.Logf("nnce status does not meet expectation:%s, error:%s. Trying again", expectedStatus, err)
return false, nil
}
return true, nil
})
}
func deleteNNCP(oc *exutil.CLI, name string) {
e2e.Logf("delete nncp %s", name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("nncp", name, "--ignore-not-found=true").Execute()
if err != nil {
e2e.Logf("Failed to delete nncp %s, error:%s", name, err)
}
}
func getDefaultSubnetForSpecificSDNNode(oc *exutil.CLI, nodeName string) string {
var sub1 string
iface, _ := getDefaultInterface(oc)
getDefaultSubnetCmd := "/usr/sbin/ip -4 -brief a show " + iface
podName, getPodNameErr := exutil.GetPodName(oc, "openshift-sdn", "app=sdn", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
cmd := []string{"-n", "openshift-sdn", "-c", "sdn", podName, "--", "/bin/sh", "-c", getDefaultSubnetCmd}
subnet, getSubnetErr := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
o.Expect(getSubnetErr).NotTo(o.HaveOccurred())
defSubnet := strings.Fields(subnet)[2]
e2e.Logf("Get the default subnet: %s", defSubnet)
_, ipNet, getCIDRErr := net.ParseCIDR(defSubnet)
o.Expect(getCIDRErr).NotTo(o.HaveOccurred())
e2e.Logf("ipnet: %v", ipNet)
sub1 = ipNet.String()
e2e.Logf("\n\n\n sub1 as -->%v<--\n\n\n", sub1)
return sub1
}
func isPlatformSuitableForNMState(oc *exutil.CLI) bool {
platform := checkPlatform(oc)
if !strings.Contains(platform, "baremetal") && !strings.Contains(platform, "none") && !strings.Contains(platform, "vsphere") && !strings.Contains(platform, "openstack") {
e2e.Logf("Skipping for unsupported platform, not baremetal/vsphere/openstack!")
return false
}
return true
}
func preCheckforRegistry(oc *exutil.CLI) {
output, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-ojsonpath='{.items[*].status.capabilities}'").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if !strings.Contains(output, "enabledCapabilities") {
g.Skip("Skip testing as enabledCapabilities not found")
}
catalogsource, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", "-n", "openshift-marketplace").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
if !strings.Contains(catalogsource, "qe-app-registry") {
g.Skip("Skip testing as qe-app-registry not found")
}
}
func createIPSECPolicy(oc *exutil.CLI, ipsecPolicy ipsecHost2hostPolicyResource) (bool, error) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipsecPolicy.template, "-p", "NAME="+ipsecPolicy.name, "NODELABEL="+ipsecPolicy.nodelabel, "LABELVALUE="+ipsecPolicy.labelvalue, "TUNELNAME="+ipsecPolicy.tunnelname, "LEFT="+ipsecPolicy.left, "LEFTCERT="+ipsecPolicy.leftcert, "RIGHT="+ipsecPolicy.right, "RIGHTSUBNET="+ipsecPolicy.rightsubnet, "MODE="+ipsecPolicy.mode)
if err != nil {
e2e.Failf("Error configure ipsec policy %v", err)
return false, err
}
return true, nil
}
func installNMstateOperator(oc *exutil.CLI) {
var (
opNamespace = "openshift-nmstate"
opName = "kubernetes-nmstate-operator"
)
e2e.Logf("Check catalogsource and install nmstate operator.")
namespaceTemplate := generateTemplateAbsolutePath("namespace-template.yaml")
operatorGroupTemplate := generateTemplateAbsolutePath("operatorgroup-template.yaml")
subscriptionTemplate := generateTemplateAbsolutePath("subscription-template.yaml")
sub := subscriptionResource{
name: "nmstate-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
operatorInstall(oc, sub, ns, og)
e2e.Logf("SUCCESS - NMState operator installed")
}
func createNMstateCR(oc *exutil.CLI, nmstateCR nmstateCRResource) {
e2e.Logf("Create NMState CR")
var (
opNamespace = "openshift-nmstate"
)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
}
func configIPSecNMSatePolicy(oc *exutil.CLI, policyName, leftIP, nodeName, tunnelname, rightIP, leftcert, mode string) {
e2e.Logf("Configure NNCP for IPSEC")
ipsecPolicyTemplate := generateTemplateAbsolutePath("ipsec-host2host-policy-template.yaml")
ipsecPolicy := ipsecHost2hostPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
tunnelname: tunnelname,
left: leftIP,
leftcert: leftcert,
right: rightIP,
mode: mode,
rightsubnet: rightIP + "/32",
template: ipsecPolicyTemplate,
}
result, configErr1 := createIPSECPolicy(oc, ipsecPolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
e2e.Logf("Wait ipsec policy applied.")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - ipsec policy is applied")
}
func configIPSecNMSatePolicyHost2net(oc *exutil.CLI, policyName, leftIP, nodeName, tunnelname, rightIP, rightNetworkAddress, rightNetworkCidr, leftcert, mode string) {
e2e.Logf("Configure NNCP for IPSEC")
ipsecPolicyTemplate := generateTemplateAbsolutePath("ipsec-host2host-policy-template.yaml")
ipsecPolicy := ipsecHost2hostPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
tunnelname: tunnelname,
left: leftIP,
leftcert: leftcert,
right: rightIP,
mode: mode,
rightsubnet: rightNetworkAddress + rightNetworkCidr,
template: ipsecPolicyTemplate,
}
result, configErr1 := createIPSECPolicy(oc, ipsecPolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
e2e.Logf("Wait ipsec policy applied.")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - ipsec policy is applied")
}
func removeIPSecConfig(oc *exutil.CLI, policyName, ifname, nodeName string) {
policyTemplate := generateTemplateAbsolutePath("iface-policy-template.yaml")
ipsecPolicy := ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifname,
descr: "disable ipsec tunnel",
ifacetype: "ipsec",
state: "absent",
template: policyTemplate,
}
result, configErr := configIface(oc, ipsecPolicy)
o.Expect(configErr).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
nncpErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
deleteNNCP(oc, policyName)
}
func verifyIPSecTunnelUp(oc *exutil.CLI, nodeName, src, dst, mode string) {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/32 dir out ; ip xfrm policy get src %s/32 dst %s/32 dir in ", src, dst, dst, src)
ipXfrmPolicy, ipsecErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmd)
o.Expect(ipsecErr).NotTo(o.HaveOccurred())
o.Expect(ipXfrmPolicy).Should(o.ContainSubstring(mode))
}
func verifyIPSecTunnelDown(oc *exutil.CLI, nodeName, src, dst, mode string) {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/32 dir out ; ip xfrm policy get src %s/32 dst %s/32 dir in ", src, dst, dst, src)
_, ipsecErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmd)
o.Expect(ipsecErr).To(o.HaveOccurred())
}
// host2net tunnel will have network address in either src or dst from right side
func verifyIPSecTunnelUphost2netTunnel(oc *exutil.CLI, nodeName, src, dst, mode string) {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/22 dir out ; ip xfrm policy get src %s/22 dst %s/32 dir in ", src, dst, dst, src)
ipXfrmPolicy, ipsecErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmd)
o.Expect(ipsecErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ipXfrmPolicy, mode)).Should(o.BeTrue())
}
// check dns server
func checkDNSServer(oc *exutil.CLI, nodeName string, dnsDomain string, dnsServerIP []string) {
cmd1 := "cat /etc/resolv.conf"
cmd2 := "cat /var/run/NetworkManager/resolv.conf"
cmd3 := `cat /var/lib/NetworkManager/NetworkManager-intern.conf`
resOuput1, dnsErr1 := exutil.DebugNodeRetryWithOptionsAndChroot(oc, nodeName, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", cmd1)
o.Expect(dnsErr1).NotTo(o.HaveOccurred())
resOuput2, dnsErr2 := exutil.DebugNodeRetryWithOptionsAndChroot(oc, nodeName, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", cmd2)
o.Expect(dnsErr2).NotTo(o.HaveOccurred())
resOuput3, dnsErr3 := exutil.DebugNodeRetryWithOptionsAndChroot(oc, nodeName, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", cmd3)
o.Expect(dnsErr3).NotTo(o.HaveOccurred())
e2e.Logf("check resolv.conf results are %v \n , %v, and \n %v \n", resOuput1, resOuput2, resOuput3)
resOuput4, dnsErr4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-o=jsonpath={.status.currentState.dns-resolver.running}").Output()
o.Expect(dnsErr4).NotTo(o.HaveOccurred())
e2e.Logf("The nns running dns status of node %v is %v", nodeName, resOuput4)
// check domain name
o.Expect(strings.Contains(resOuput1, dnsDomain)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput2, dnsDomain)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput3, dnsDomain)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput4, dnsDomain)).Should(o.BeTrue())
//check nameservers
for _, serverIP := range dnsServerIP {
matchServerIP := "nameserver " + serverIP
o.Expect(strings.Contains(resOuput1, matchServerIP)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput2, matchServerIP)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput3, serverIP)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput4, serverIP)).Should(o.BeTrue())
}
}
func getAvaliableNameServer(oc *exutil.CLI, nodeName string) string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-o=jsonpath={.status.currentState.dns-resolver.running.server[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
return output
}
func extractMetricValue(metrics string, featureNames []string, expectValues []int) bool {
for i, featureName := range featureNames {
re := regexp.MustCompile(fmt.Sprintf(`kubernetes_nmstate_features_applied{name="%s"} (\d+)`, regexp.QuoteMeta(featureName)))
match := re.FindStringSubmatch(metrics)
if len(match) < 2 {
e2e.Logf("Metric not found for name: %s", featureName)
return false
}
value, err := strconv.Atoi(match[1])
if err != nil {
e2e.Logf("Failed to convert value to int for name: %s, error: %v", featureName, err)
return false
}
if value != expectValues[i] {
e2e.Logf("%s Metric does not match the expected value of %d", featureName, expectValues[i])
return false
}
}
return true
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
31cc6809-b85d-44a2-9a47-954701762708
|
generateTemplateAbsolutePath
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func generateTemplateAbsolutePath(fileName string) string {
testDataDir := exutil.FixturePath("testdata", "networking/nmstate")
return filepath.Join(testDataDir, fileName)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2c567a92-eeb8-4853-acff-b25b3a58cf13
|
createNMStateCR
|
['nmstateCRResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func createNMStateCR(oc *exutil.CLI, nmstatecr nmstateCRResource, namespace string) (bool, error) {
g.By("Creating NMState CR from template")
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", nmstatecr.template, "-p", "NAME="+nmstatecr.name)
if err != nil {
e2e.Logf("Error creating NMState CR %v", err)
return false, err
}
result, err := checkNmstateCR(oc, namespace)
return result, err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a7e32ca2-4b6e-49ae-a42c-64f7ac791ea7
|
checkNmstateCR
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func checkNmstateCR(oc *exutil.CLI, namespace string) (bool, error) {
err := waitForPodWithLabelReady(oc, namespace, "component=kubernetes-nmstate-handler")
if err != nil {
e2e.Logf("nmstate-handler Pods did not transition to ready state %v", err)
return false, err
}
err = waitForPodWithLabelReady(oc, namespace, "component=kubernetes-nmstate-webhook")
if err != nil {
e2e.Logf("nmstate-webhook pod did not transition to ready state %v", err)
return false, err
}
err = waitForPodWithLabelReady(oc, namespace, "app=nmstate-console-plugin")
if err != nil {
e2e.Logf("nmstate-console-plugin pod did not transition to ready state %v", err)
return false, err
}
err = waitForPodWithLabelReady(oc, namespace, "component=kubernetes-nmstate-metrics")
if err != nil {
e2e.Logf("nmstate-metrics pod did not transition to ready state %v", err)
return false, err
}
e2e.Logf("nmstate-handler, nmstate-webhook, nmstate-console-plugin and nmstate-metrics pods created successfully")
return true, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
ff1e7092-0020-4a7d-a6b8-338aeb5f603a
|
deleteNMStateCR
|
['nmstateCRResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func deleteNMStateCR(oc *exutil.CLI, rs nmstateCRResource) {
e2e.Logf("delete %s CR %s", "nmstate", rs.name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("nmstate", rs.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c7acd554-245c-458a-a675-e148fbfd8970
|
configIface
|
['"strconv"']
|
['ifacePolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func configIface(oc *exutil.CLI, ifacepolicy ifacePolicyResource) (bool, error) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ifacepolicy.template, "-p", "NAME="+ifacepolicy.name, "NODELABEL="+ifacepolicy.nodelabel, "LABELVALUE="+ifacepolicy.labelvalue, "IFACENAME="+ifacepolicy.ifacename, "DESCR="+ifacepolicy.descr, "IFACETYPE="+ifacepolicy.ifacetype, "STATE="+ifacepolicy.state, "IPV6FLAG="+strconv.FormatBool(ifacepolicy.ipv6flag))
if err != nil {
e2e.Failf("Error configure interface %v", err)
return false, err
}
return true, nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
45cbf0d4-54fd-4be8-bdb2-76e0248ec2ca
|
configBond
|
['bondPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func configBond(oc *exutil.CLI, bondpolicy bondPolicyResource) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bondpolicy.template, "-p", "NAME="+bondpolicy.name, "NODELABEL="+bondpolicy.nodelabel, "LABELVALUE="+bondpolicy.labelvalue, "IFACENAME="+bondpolicy.ifacename, "DESCR="+bondpolicy.descr, "STATE="+bondpolicy.state, "PORT1="+bondpolicy.port1, "PORT2="+bondpolicy.port2)
if err != nil {
e2e.Logf("Error configure bond %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
20793485-913e-407a-b2b2-3999ce5f5bd0
|
configBondWithIP
|
['bondPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func configBondWithIP(oc *exutil.CLI, bondpolicy bondPolicyResource) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bondpolicy.template, "-p", "NAME="+bondpolicy.name, "NODELABEL="+bondpolicy.nodelabel, "LABELVALUE="+bondpolicy.labelvalue, "IFACENAME="+bondpolicy.ifacename, "DESCR="+bondpolicy.descr, "STATE="+bondpolicy.state, "PORT1="+bondpolicy.port1, "PORT2="+bondpolicy.port2, "IPADDRV4="+bondpolicy.ipaddrv4)
if err != nil {
e2e.Logf("Error configure bond %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
19a5b0c4-1777-46d9-8bee-65d805c6c0f7
|
configNNCP
|
['"strconv"']
|
['vlanPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (vpr *vlanPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vpr.template, "-p", "NAME="+vpr.name, "NODELABEL="+vpr.nodelabel, "LABELVALUE="+vpr.labelvalue, "IFACENAME="+vpr.ifacename, "DESCR="+vpr.descr, "STATE="+vpr.state, "BASEIFACE="+vpr.baseiface, "VLANID="+strconv.Itoa(vpr.vlanid))
if err != nil {
e2e.Logf("Error configure vlan %v", err)
return err
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
0c8d80d3-c962-4829-b2da-b335ab40b6e2
|
configNNCPWithIP
|
['"strconv"']
|
['vlanPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (vpr *vlanPolicyResource) configNNCPWithIP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vpr.template, "-p", "NAME="+vpr.name, "NODELABEL="+vpr.nodelabel, "LABELVALUE="+vpr.labelvalue, "IFACENAME="+vpr.ifacename, "DESCR="+vpr.descr, "STATE="+vpr.state, "BASEIFACE="+vpr.baseiface, "VLANID="+strconv.Itoa(vpr.vlanid), "IPADDRV4="+vpr.ipaddrv4)
if err != nil {
e2e.Logf("Error configure vlan %v", err)
return err
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ded61ad8-203c-49a8-aeb9-955831f6d3be
|
configNNCP
|
['bridgevlanPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *bridgevlanPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "IFACENAME="+bvpr.ifacename, "DESCR="+bvpr.descr, "STATE="+bvpr.state, "PORT="+bvpr.port)
if err != nil {
e2e.Logf("Error configure bridge %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d052d1ce-ca9d-425e-babb-780130ef4378
|
configNNCP
|
['bridgehostnamePolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *bridgehostnamePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "IFACENAME="+bvpr.ifacename, "STATE="+bvpr.state)
if err != nil {
e2e.Logf("Error configure bridge %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
1e3644aa-0013-48d0-8387-b3872377c9d8
|
configNNCP
|
['ovnMappingPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *ovnMappingPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue,
"LOCALNET1="+bvpr.localnet1, "BRIDGE1="+bvpr.bridge1)
if err != nil {
e2e.Logf("Error configure ovnmapping %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
65a7df7d-81a8-46f0-8ad3-2a1b51ccb658
|
configNNCP
|
['ovsDBGlobalPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *ovsDBGlobalPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue,
"OVSCONFIG="+bvpr.ovsconfig, "OVSVALUE="+bvpr.ovsvalue)
if err != nil {
e2e.Logf("Error configure ovsDBGlobal %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
53a0b0e0-48fe-4901-9a4e-aba69d288482
|
configNNCP
|
['staticHostnamePolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *staticHostnamePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "HOSTDOMAIN="+bvpr.hostdomain)
if err != nil {
e2e.Logf("Error configure staticHostname %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
01861efc-1093-4182-ac93-ec0998a8be2b
|
configNNCP
|
['staticDNSPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *staticDNSPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NAMESPACE="+bvpr.namespace, "NODE="+bvpr.nodeName,
"DNSDOMAIN="+bvpr.dnsdomain, "SERVERIP1="+bvpr.serverip1, "SERVERIP2="+bvpr.serverip2)
if err != nil {
e2e.Logf("Error configure staticDNS %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9c42cacc-68a3-4558-80bf-03f14c5b7787
|
configNNCP
|
['stIPRoutePolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (stpr *stIPRoutePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", stpr.template, "-p", "NAME="+stpr.name, "NODELABEL="+stpr.nodelabel, "LABELVALUE="+stpr.labelvalue, "IFACENAME="+stpr.ifacename, "DESCR="+stpr.descr, "STATE="+stpr.state,
"IPADDRV4="+stpr.ipaddrv4, "DESTADDRV4="+stpr.destaddrv4, "NEXTHOPADDRV4="+stpr.nexthopaddrv4, "IPADDRV6="+stpr.ipaddrv6, "DESTADDRV6="+stpr.destaddrv6, "NEXTHOPADDRV6="+stpr.nexthopaddrv6)
if err != nil {
e2e.Logf("Error configure static ip and route %v", err)
return err
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
560fee70-a5f4-4e67-bcaa-dd67ee024d95
|
configNNCP
|
['"strconv"']
|
['routePolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (rpr *routePolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", rpr.template, "-p", "NAME="+rpr.name, "NODELABEL="+rpr.nodelabel, "LABELVALUE="+rpr.labelvalue, "IFACENAME="+rpr.ifacename, "STATE="+rpr.state,
"DESTADDR="+rpr.destaddr, "NEXTHOPADDR="+rpr.nexthopaddr, "ID="+strconv.Itoa(rpr.tableid))
if err != nil {
e2e.Logf("Error configure route %v", err)
return err
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
f7b7d0a7-c49d-47fb-9f51-243f3a4fce7d
|
configNNCP
|
['"strconv"']
|
['bondvlanPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func (bvpr *bondvlanPolicyResource) configNNCP(oc *exutil.CLI) error {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bvpr.template, "-p", "NAME="+bvpr.name, "NODELABEL="+bvpr.nodelabel, "LABELVALUE="+bvpr.labelvalue, "DESCR="+bvpr.descr, "STATE="+bvpr.state, "BONDNAME="+bvpr.bondname, "PORT1="+bvpr.port1, "PORT1TYPE="+bvpr.port1type, "PORT2="+bvpr.port2, "PORT2TYPE="+bvpr.port2type, "VLANIFNAME="+bvpr.vlanifname, "VLANID="+strconv.Itoa(bvpr.vlanid), "IPADDRV4="+bvpr.ipaddrv4, "IPADDRV6="+bvpr.ipaddrv6)
if err != nil {
e2e.Logf("Error configure vlan over bond %v", err)
return err
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
3cabb097-1064-4f3f-9449-f4753afbab78
|
checkNNCPStatus
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func checkNNCPStatus(oc *exutil.CLI, policyName string, expectedStatus string) error {
return wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) {
e2e.Logf("Checking status of nncp %s", policyName)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nncp", policyName).Output()
if err != nil {
e2e.Logf("Failed to get nncp status, error:%s. Trying again", err)
return false, nil
}
if !strings.Contains(output, expectedStatus) {
e2e.Logf("nncp status does not meet expectation:%s, error:%s, output:%s. Trying again", expectedStatus, err, output)
return false, nil
}
return true, nil
})
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4ec97363-4720-4ca5-ac10-32601af02c6b
|
checkNNCEStatus
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func checkNNCEStatus(oc *exutil.CLI, nnceName string, expectedStatus string) error {
return wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) {
e2e.Logf("Checking status of nnce %s", nnceName)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nnce", nnceName).Output()
if err != nil {
e2e.Logf("Failed to get nnce status, error:%s. Trying again", err)
return false, nil
}
if !strings.Contains(output, expectedStatus) {
e2e.Logf("nnce status does not meet expectation:%s, error:%s. Trying again", expectedStatus, err)
return false, nil
}
return true, nil
})
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d10de333-fc3b-44cb-b5a1-841d09a992b7
|
deleteNNCP
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func deleteNNCP(oc *exutil.CLI, name string) {
e2e.Logf("delete nncp %s", name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("nncp", name, "--ignore-not-found=true").Execute()
if err != nil {
e2e.Logf("Failed to delete nncp %s, error:%s", name, err)
}
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
2f6b55c3-5cb5-4133-b5cc-2dc3da208167
|
getDefaultSubnetForSpecificSDNNode
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func getDefaultSubnetForSpecificSDNNode(oc *exutil.CLI, nodeName string) string {
var sub1 string
iface, _ := getDefaultInterface(oc)
getDefaultSubnetCmd := "/usr/sbin/ip -4 -brief a show " + iface
podName, getPodNameErr := exutil.GetPodName(oc, "openshift-sdn", "app=sdn", nodeName)
o.Expect(getPodNameErr).NotTo(o.HaveOccurred())
cmd := []string{"-n", "openshift-sdn", "-c", "sdn", podName, "--", "/bin/sh", "-c", getDefaultSubnetCmd}
subnet, getSubnetErr := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
o.Expect(getSubnetErr).NotTo(o.HaveOccurred())
defSubnet := strings.Fields(subnet)[2]
e2e.Logf("Get the default subnet: %s", defSubnet)
_, ipNet, getCIDRErr := net.ParseCIDR(defSubnet)
o.Expect(getCIDRErr).NotTo(o.HaveOccurred())
e2e.Logf("ipnet: %v", ipNet)
sub1 = ipNet.String()
e2e.Logf("\n\n\n sub1 as -->%v<--\n\n\n", sub1)
return sub1
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
218c9f09-f80e-4025-82d5-02e3142b0ef2
|
isPlatformSuitableForNMState
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func isPlatformSuitableForNMState(oc *exutil.CLI) bool {
platform := checkPlatform(oc)
if !strings.Contains(platform, "baremetal") && !strings.Contains(platform, "none") && !strings.Contains(platform, "vsphere") && !strings.Contains(platform, "openstack") {
e2e.Logf("Skipping for unsupported platform, not baremetal/vsphere/openstack!")
return false
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
51952b40-e033-4d71-8efc-407db6c434aa
|
preCheckforRegistry
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func preCheckforRegistry(oc *exutil.CLI) {
output, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-ojsonpath='{.items[*].status.capabilities}'").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if !strings.Contains(output, "enabledCapabilities") {
g.Skip("Skip testing as enabledCapabilities not found")
}
catalogsource, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", "-n", "openshift-marketplace").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
if !strings.Contains(catalogsource, "qe-app-registry") {
g.Skip("Skip testing as qe-app-registry not found")
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
f049489e-390a-40fc-b2a9-9f990448445d
|
createIPSECPolicy
|
['ipsecHost2hostPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func createIPSECPolicy(oc *exutil.CLI, ipsecPolicy ipsecHost2hostPolicyResource) (bool, error) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipsecPolicy.template, "-p", "NAME="+ipsecPolicy.name, "NODELABEL="+ipsecPolicy.nodelabel, "LABELVALUE="+ipsecPolicy.labelvalue, "TUNELNAME="+ipsecPolicy.tunnelname, "LEFT="+ipsecPolicy.left, "LEFTCERT="+ipsecPolicy.leftcert, "RIGHT="+ipsecPolicy.right, "RIGHTSUBNET="+ipsecPolicy.rightsubnet, "MODE="+ipsecPolicy.mode)
if err != nil {
e2e.Failf("Error configure ipsec policy %v", err)
return false, err
}
return true, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4b454e13-0967-4f7b-9861-89381ae4f1e4
|
installNMstateOperator
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func installNMstateOperator(oc *exutil.CLI) {
var (
opNamespace = "openshift-nmstate"
opName = "kubernetes-nmstate-operator"
)
e2e.Logf("Check catalogsource and install nmstate operator.")
namespaceTemplate := generateTemplateAbsolutePath("namespace-template.yaml")
operatorGroupTemplate := generateTemplateAbsolutePath("operatorgroup-template.yaml")
subscriptionTemplate := generateTemplateAbsolutePath("subscription-template.yaml")
sub := subscriptionResource{
name: "nmstate-operator-sub",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
operatorInstall(oc, sub, ns, og)
e2e.Logf("SUCCESS - NMState operator installed")
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
0148dc9d-02c5-4247-bde8-d2e866f87a10
|
createNMstateCR
|
['nmstateCRResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func createNMstateCR(oc *exutil.CLI, nmstateCR nmstateCRResource) {
e2e.Logf("Create NMState CR")
var (
opNamespace = "openshift-nmstate"
)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7442f97b-bfd5-4046-af29-b268f3113e35
|
configIPSecNMSatePolicy
|
['ipsecHost2hostPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func configIPSecNMSatePolicy(oc *exutil.CLI, policyName, leftIP, nodeName, tunnelname, rightIP, leftcert, mode string) {
e2e.Logf("Configure NNCP for IPSEC")
ipsecPolicyTemplate := generateTemplateAbsolutePath("ipsec-host2host-policy-template.yaml")
ipsecPolicy := ipsecHost2hostPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
tunnelname: tunnelname,
left: leftIP,
leftcert: leftcert,
right: rightIP,
mode: mode,
rightsubnet: rightIP + "/32",
template: ipsecPolicyTemplate,
}
result, configErr1 := createIPSECPolicy(oc, ipsecPolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
e2e.Logf("Wait ipsec policy applied.")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - ipsec policy is applied")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c1f98efa-2157-4777-8260-3c81ae25cf44
|
configIPSecNMSatePolicyHost2net
|
['ipsecHost2hostPolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func configIPSecNMSatePolicyHost2net(oc *exutil.CLI, policyName, leftIP, nodeName, tunnelname, rightIP, rightNetworkAddress, rightNetworkCidr, leftcert, mode string) {
e2e.Logf("Configure NNCP for IPSEC")
ipsecPolicyTemplate := generateTemplateAbsolutePath("ipsec-host2host-policy-template.yaml")
ipsecPolicy := ipsecHost2hostPolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
tunnelname: tunnelname,
left: leftIP,
leftcert: leftcert,
right: rightIP,
mode: mode,
rightsubnet: rightNetworkAddress + rightNetworkCidr,
template: ipsecPolicyTemplate,
}
result, configErr1 := createIPSECPolicy(oc, ipsecPolicy)
o.Expect(configErr1).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
e2e.Logf("Wait ipsec policy applied.")
nncpErr1 := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - ipsec policy is applied")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4751e0f8-e2b0-454d-8c91-1ff81dc7d93b
|
removeIPSecConfig
|
['ifacePolicyResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func removeIPSecConfig(oc *exutil.CLI, policyName, ifname, nodeName string) {
policyTemplate := generateTemplateAbsolutePath("iface-policy-template.yaml")
ipsecPolicy := ifacePolicyResource{
name: policyName,
nodelabel: "kubernetes.io/hostname",
labelvalue: nodeName,
ifacename: ifname,
descr: "disable ipsec tunnel",
ifacetype: "ipsec",
state: "absent",
template: policyTemplate,
}
result, configErr := configIface(oc, ipsecPolicy)
o.Expect(configErr).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
nncpErr := checkNNCPStatus(oc, policyName, "Available")
exutil.AssertWaitPollNoErr(nncpErr, "policy applied failed")
e2e.Logf("SUCCESS - policy is applied")
deleteNNCP(oc, policyName)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9d4c38d5-2b8b-4c02-9b17-e4f3dad57a19
|
verifyIPSecTunnelUp
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func verifyIPSecTunnelUp(oc *exutil.CLI, nodeName, src, dst, mode string) {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/32 dir out ; ip xfrm policy get src %s/32 dst %s/32 dir in ", src, dst, dst, src)
ipXfrmPolicy, ipsecErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmd)
o.Expect(ipsecErr).NotTo(o.HaveOccurred())
o.Expect(ipXfrmPolicy).Should(o.ContainSubstring(mode))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a01bcf92-d31f-4f56-aa5c-3fc1a440c3e9
|
verifyIPSecTunnelDown
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func verifyIPSecTunnelDown(oc *exutil.CLI, nodeName, src, dst, mode string) {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/32 dir out ; ip xfrm policy get src %s/32 dst %s/32 dir in ", src, dst, dst, src)
_, ipsecErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmd)
o.Expect(ipsecErr).To(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7c96d7a7-bb57-4aeb-8ad2-5fcf6b19878f
|
verifyIPSecTunnelUphost2netTunnel
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func verifyIPSecTunnelUphost2netTunnel(oc *exutil.CLI, nodeName, src, dst, mode string) {
cmd := fmt.Sprintf("ip xfrm policy get src %s/32 dst %s/22 dir out ; ip xfrm policy get src %s/22 dst %s/32 dir in ", src, dst, dst, src)
ipXfrmPolicy, ipsecErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmd)
o.Expect(ipsecErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(ipXfrmPolicy, mode)).Should(o.BeTrue())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
3512314d-2f7d-4478-9d4c-6054d649392e
|
checkDNSServer
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func checkDNSServer(oc *exutil.CLI, nodeName string, dnsDomain string, dnsServerIP []string) {
cmd1 := "cat /etc/resolv.conf"
cmd2 := "cat /var/run/NetworkManager/resolv.conf"
cmd3 := `cat /var/lib/NetworkManager/NetworkManager-intern.conf`
resOuput1, dnsErr1 := exutil.DebugNodeRetryWithOptionsAndChroot(oc, nodeName, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", cmd1)
o.Expect(dnsErr1).NotTo(o.HaveOccurred())
resOuput2, dnsErr2 := exutil.DebugNodeRetryWithOptionsAndChroot(oc, nodeName, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", cmd2)
o.Expect(dnsErr2).NotTo(o.HaveOccurred())
resOuput3, dnsErr3 := exutil.DebugNodeRetryWithOptionsAndChroot(oc, nodeName, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", cmd3)
o.Expect(dnsErr3).NotTo(o.HaveOccurred())
e2e.Logf("check resolv.conf results are %v \n , %v, and \n %v \n", resOuput1, resOuput2, resOuput3)
resOuput4, dnsErr4 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-o=jsonpath={.status.currentState.dns-resolver.running}").Output()
o.Expect(dnsErr4).NotTo(o.HaveOccurred())
e2e.Logf("The nns running dns status of node %v is %v", nodeName, resOuput4)
// check domain name
o.Expect(strings.Contains(resOuput1, dnsDomain)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput2, dnsDomain)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput3, dnsDomain)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput4, dnsDomain)).Should(o.BeTrue())
//check nameservers
for _, serverIP := range dnsServerIP {
matchServerIP := "nameserver " + serverIP
o.Expect(strings.Contains(resOuput1, matchServerIP)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput2, matchServerIP)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput3, serverIP)).Should(o.BeTrue())
o.Expect(strings.Contains(resOuput4, serverIP)).Should(o.BeTrue())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
93bfc7ae-0843-4a7c-8891-f54138372985
|
getAvaliableNameServer
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func getAvaliableNameServer(oc *exutil.CLI, nodeName string) string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", nodeName, "-o=jsonpath={.status.currentState.dns-resolver.running.server[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
return output
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
78d604f9-52b8-4c34-9046-8d013cf599c1
|
extractMetricValue
|
['"fmt"', '"regexp"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/nmstate_util.go
|
func extractMetricValue(metrics string, featureNames []string, expectValues []int) bool {
for i, featureName := range featureNames {
re := regexp.MustCompile(fmt.Sprintf(`kubernetes_nmstate_features_applied{name="%s"} (\d+)`, regexp.QuoteMeta(featureName)))
match := re.FindStringSubmatch(metrics)
if len(match) < 2 {
e2e.Logf("Metric not found for name: %s", featureName)
return false
}
value, err := strconv.Atoi(match[1])
if err != nil {
e2e.Logf("Failed to convert value to int for name: %s, error: %v", featureName, err)
return false
}
if value != expectValues[i] {
e2e.Logf("%s Metric does not match the expected value of %d", featureName, expectValues[i])
return false
}
}
return true
}
|
networking
| ||||
test
|
openshift/openshift-tests-private
|
3e3e5055-ad23-484d-93e4-fbdfc7ffdeee
|
on_prem
|
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/on_prem.go
|
package networking
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN on-prem", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-cno", exutil.KubeConfigPath())
)
//author: [email protected]
g.It("Author:zzhao-Medium-77042-Add annotation in the on-prem namespace static pods for workload partitioning", func() {
// Skip this case for un-supported platform
g.By("Check platforms")
platformtype := exutil.CheckPlatform(oc)
nsForPlatforms := map[string]string{
"baremetal": "openshift-kni-infra",
"vsphere": "openshift-vsphere-infra",
"nutanix": "openshift-nutanix-infra",
}
ns := nsForPlatforms[platformtype]
if ns == "" {
g.Skip("Skip for non-supported platform")
}
appLabel := strings.Replace(ns, "openshift-", "", -1)
lbappLable := appLabel + "-api-lb"
dnsappLable := appLabel + "-coredns"
kaappLabel := appLabel + "-vrrp"
allLabels := []string{lbappLable, dnsappLable, kaappLabel}
exutil.By("check all pods annotation")
for _, label := range allLabels {
podNames, error := oc.WithoutNamespace().AsAdmin().Run("get").Args("po", "-n", ns, "-l=app="+label, `-ojsonpath={.items[?(@.status.phase=="Running")].metadata.name}`).Output()
o.Expect(error).NotTo(o.HaveOccurred())
if podNames == "" {
g.Skip("no related pods are running, so it's maybe use ELB, skip this testing")
}
podName := strings.Fields(podNames)
// Check if workload partioning annotation is added
podAnnotation, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("po", "-n", ns, podName[0], `-ojsonpath={.metadata.annotations}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podAnnotation).To(o.ContainSubstring(`"target.workload.openshift.io/management":"{\"effect\": \"PreferredDuringScheduling\"}"`))
}
})
//author: [email protected]
g.It("Author:qiowang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-49841-Medium-50215-IPI on vSphere configures keepalived in unicast mode for API/INGRESS by default [Disruptive]", func() {
platform := exutil.CheckPlatform(oc)
if !strings.Contains(platform, "vsphere") {
g.Skip("Test case should be run on vSphere, skip for other platforms!!")
}
apiVIPs := GetVIPOnCluster(oc, platform, "apiVIP")
ingressVIPs := GetVIPOnCluster(oc, platform, "ingressVIP")
ipStackType := checkIPStackType(oc)
if len(apiVIPs) == 0 || len(ingressVIPs) == 0 {
g.Skip("Found none AIP/INGRESS VIP on the cluster, skip the testing!!")
}
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
masterNodes, getMasterNodeErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMasterNodeErr).NotTo(o.HaveOccurred())
var (
vipNode string
newVIPNode string
vipTypes = []string{"apiVIP", "ingressVIP"}
vips = [][]string{apiVIPs, ingressVIPs}
vipNodeSets = [][]string{masterNodes, nodes}
cmds = []string{"cat /etc/keepalived/monitor.conf", "cat /etc/keepalived/keepalived.conf"}
expResults = []string{"mode: unicast", "unicast_src_ip"}
)
for i, vipType := range vipTypes {
exutil.By("1. Get the node which holds the " + vipType)
e2e.Logf("The %s is: %s", vipType, vips[i])
vipNode = FindVIPNode(oc, vips[i][0])
o.Expect(vipNode).NotTo(o.Equal(""))
vipNodeIP1, vipNodeIP2 := getNodeIP(oc, vipNode)
e2e.Logf("%s is on node %s, the node's IP address is: %s, %s", vipType, vipNode, vipNodeIP1, vipNodeIP2)
exutil.By("2. Check the keepalived monitor file and config file on the " + vipType + " node")
e2e.Logf("Check on the %s node %s", vipType, vipNode)
for j, cmd := range cmds {
datas, err := exutil.DebugNodeWithChroot(oc, vipNode, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(datas, expResults[j])).Should(o.BeTrue())
}
exutil.By("3. Capture vrrp advertisement packets on the " + vipType + " node")
tcpdumpCmd := "timeout 10s tcpdump -nn -i any proto 112"
runCmd, cmdOutput, _, err := oc.WithoutNamespace().AsAdmin().Run("debug").Args("-n", "default", "node/"+vipNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer runCmd.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
runCmd.Wait()
for _, node := range vipNodeSets[i] {
if node != vipNode {
nodeIP1, nodeIP2 := getNodeIP(oc, node)
if ipStackType == "dualstack" {
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP1+" > "+nodeIP1+": VRRPv3, Advertisement")).Should(o.BeTrue())
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP2+" > "+nodeIP2+": VRRPv2, Advertisement")).Should(o.BeTrue())
} else if ipStackType == "ipv6single" {
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP2+" > "+nodeIP2+": VRRPv3, Advertisement")).Should(o.BeTrue())
} else {
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP2+" > "+nodeIP2+": VRRPv2, Advertisement")).Should(o.BeTrue())
}
}
}
exutil.By("4. Reboot the " + vipType + " node, check there will be new node holds the " + vipType)
defer checkNodeStatus(oc, vipNode, "Ready")
rebootNode(oc, vipNode)
checkNodeStatus(oc, vipNode, "NotReady")
checkNodeStatus(oc, vipNode, "Ready")
newVIPNode = FindVIPNode(oc, vips[i][0])
o.Expect(newVIPNode).NotTo(o.Equal(""))
e2e.Logf("%s is on node %s", vipType, newVIPNode)
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
64dc15ca-965b-4235-a902-75abc0eb018d
|
Author:zzhao-Medium-77042-Add annotation in the on-prem namespace static pods for workload partitioning
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/on_prem.go
|
g.It("Author:zzhao-Medium-77042-Add annotation in the on-prem namespace static pods for workload partitioning", func() {
// Skip this case for un-supported platform
g.By("Check platforms")
platformtype := exutil.CheckPlatform(oc)
nsForPlatforms := map[string]string{
"baremetal": "openshift-kni-infra",
"vsphere": "openshift-vsphere-infra",
"nutanix": "openshift-nutanix-infra",
}
ns := nsForPlatforms[platformtype]
if ns == "" {
g.Skip("Skip for non-supported platform")
}
appLabel := strings.Replace(ns, "openshift-", "", -1)
lbappLable := appLabel + "-api-lb"
dnsappLable := appLabel + "-coredns"
kaappLabel := appLabel + "-vrrp"
allLabels := []string{lbappLable, dnsappLable, kaappLabel}
exutil.By("check all pods annotation")
for _, label := range allLabels {
podNames, error := oc.WithoutNamespace().AsAdmin().Run("get").Args("po", "-n", ns, "-l=app="+label, `-ojsonpath={.items[?(@.status.phase=="Running")].metadata.name}`).Output()
o.Expect(error).NotTo(o.HaveOccurred())
if podNames == "" {
g.Skip("no related pods are running, so it's maybe use ELB, skip this testing")
}
podName := strings.Fields(podNames)
// Check if workload partioning annotation is added
podAnnotation, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("po", "-n", ns, podName[0], `-ojsonpath={.metadata.annotations}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podAnnotation).To(o.ContainSubstring(`"target.workload.openshift.io/management":"{\"effect\": \"PreferredDuringScheduling\"}"`))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
af187c29-a88b-47a3-9361-8723604346f9
|
Author:qiowang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-49841-Medium-50215-IPI on vSphere configures keepalived in unicast mode for API/INGRESS by default [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/on_prem.go
|
g.It("Author:qiowang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-49841-Medium-50215-IPI on vSphere configures keepalived in unicast mode for API/INGRESS by default [Disruptive]", func() {
platform := exutil.CheckPlatform(oc)
if !strings.Contains(platform, "vsphere") {
g.Skip("Test case should be run on vSphere, skip for other platforms!!")
}
apiVIPs := GetVIPOnCluster(oc, platform, "apiVIP")
ingressVIPs := GetVIPOnCluster(oc, platform, "ingressVIP")
ipStackType := checkIPStackType(oc)
if len(apiVIPs) == 0 || len(ingressVIPs) == 0 {
g.Skip("Found none AIP/INGRESS VIP on the cluster, skip the testing!!")
}
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
masterNodes, getMasterNodeErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMasterNodeErr).NotTo(o.HaveOccurred())
var (
vipNode string
newVIPNode string
vipTypes = []string{"apiVIP", "ingressVIP"}
vips = [][]string{apiVIPs, ingressVIPs}
vipNodeSets = [][]string{masterNodes, nodes}
cmds = []string{"cat /etc/keepalived/monitor.conf", "cat /etc/keepalived/keepalived.conf"}
expResults = []string{"mode: unicast", "unicast_src_ip"}
)
for i, vipType := range vipTypes {
exutil.By("1. Get the node which holds the " + vipType)
e2e.Logf("The %s is: %s", vipType, vips[i])
vipNode = FindVIPNode(oc, vips[i][0])
o.Expect(vipNode).NotTo(o.Equal(""))
vipNodeIP1, vipNodeIP2 := getNodeIP(oc, vipNode)
e2e.Logf("%s is on node %s, the node's IP address is: %s, %s", vipType, vipNode, vipNodeIP1, vipNodeIP2)
exutil.By("2. Check the keepalived monitor file and config file on the " + vipType + " node")
e2e.Logf("Check on the %s node %s", vipType, vipNode)
for j, cmd := range cmds {
datas, err := exutil.DebugNodeWithChroot(oc, vipNode, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(datas, expResults[j])).Should(o.BeTrue())
}
exutil.By("3. Capture vrrp advertisement packets on the " + vipType + " node")
tcpdumpCmd := "timeout 10s tcpdump -nn -i any proto 112"
runCmd, cmdOutput, _, err := oc.WithoutNamespace().AsAdmin().Run("debug").Args("-n", "default", "node/"+vipNode, "--", "bash", "-c", tcpdumpCmd).Background()
defer runCmd.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
runCmd.Wait()
for _, node := range vipNodeSets[i] {
if node != vipNode {
nodeIP1, nodeIP2 := getNodeIP(oc, node)
if ipStackType == "dualstack" {
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP1+" > "+nodeIP1+": VRRPv3, Advertisement")).Should(o.BeTrue())
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP2+" > "+nodeIP2+": VRRPv2, Advertisement")).Should(o.BeTrue())
} else if ipStackType == "ipv6single" {
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP2+" > "+nodeIP2+": VRRPv3, Advertisement")).Should(o.BeTrue())
} else {
o.Expect(strings.Contains(cmdOutput.String(), vipNodeIP2+" > "+nodeIP2+": VRRPv2, Advertisement")).Should(o.BeTrue())
}
}
}
exutil.By("4. Reboot the " + vipType + " node, check there will be new node holds the " + vipType)
defer checkNodeStatus(oc, vipNode, "Ready")
rebootNode(oc, vipNode)
checkNodeStatus(oc, vipNode, "NotReady")
checkNodeStatus(oc, vipNode, "Ready")
newVIPNode = FindVIPNode(oc, vips[i][0])
o.Expect(newVIPNode).NotTo(o.Equal(""))
e2e.Logf("%s is on node %s", vipType, newVIPNode)
}
})
| |||||
test
|
openshift/openshift-tests-private
|
d1de2bf0-f285-4878-8d58-1701035cb70a
|
ovn_misc
|
import (
"context"
"fmt"
"net"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
package networking
import (
"context"
"fmt"
"net"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN misc", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-ovnkubernetes", exutil.KubeConfigPath())
g.BeforeEach(func() {
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
// author: [email protected]
g.It("Author:anusaxen-Medium-49216-ovnkube-node logs should not print api token in logs. ", func() {
g.By("it's for bug 2009857")
workerNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ovnkubePod, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", workerNode)
o.Expect(err).NotTo(o.HaveOccurred())
podlogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(ovnkubePod, "-n", "openshift-ovn-kubernetes", "-c", "ovnkube-controller").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podlogs).NotTo(o.ContainSubstring("kube-api-token"))
g.By("ovnkube-node logs doesn't contain api-token")
})
//author: [email protected]
g.It("NonHyperShiftHOST-Author:zzhao-Medium-54742- Completed pod ip can be released.[Flaky]", func() {
g.By("it's for bug 2091157,Check the ovnkube-master logs to see if completed pod already release ip")
result := findLogFromPod(oc, "Releasing IPs for Completed pod", "openshift-ovn-kubernetes", "app=ovnkube-node", "ovnkube-controller")
o.Expect(result).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:anusaxen-NonHyperShiftHOST-NonPreRelease-High-55144-[FdpOvnOvs] Switching OVN gateway modes should not delete custom routes created on node logical routers.[Disruptive] ", func() {
exutil.By("it's for bug 2042516")
var desiredMode string
//need to find out original mode cluster is on so that we can revert back to same post test
origMode := getOVNGatewayMode(oc)
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one schedulable node")
}
exutil.By("Add a logical route on a node")
nodeLogicalRouterName := "GR_" + nodeList.Items[0].Name
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
lrRouteListDelCmd := "ovn-nbctl lr-route-del " + nodeLogicalRouterName + " 192.168.122.0/24 192.168.122.4"
lrRouteListAddCmd := "ovn-nbctl lr-route-add " + nodeLogicalRouterName + " 192.168.122.0/24 192.168.122.4"
defer exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
_, lrlErr1 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListAddCmd)
o.Expect(lrlErr1).NotTo(o.HaveOccurred())
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
exutil.By("List the logical route on a node after gateway mode switch")
lrRouteListCmd := "ovn-nbctl lr-route-list " + nodeLogicalRouterName
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
defer exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
lRlOutput, lrlErr2 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListCmd)
o.Expect(lrlErr2).NotTo(o.HaveOccurred())
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.0/24"))
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.4"))
//reverting back cluster to original mode it was on and deleting fake route
switchOVNGatewayMode(oc, origMode)
exutil.By("List the logical route on a node after gateway mode revert")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
defer exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
_, lrlErr3 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListCmd)
o.Expect(lrlErr3).NotTo(o.HaveOccurred())
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.0/24"))
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.4"))
exutil.By("Delete the logical route on a node after gateway mode revert")
//lrRouteListDelCmd = "ovn-nbctl lr-route-del " + nodeLogicalRouterName + " 192.168.122.0/24 192.168.122.4"
_, lrlErr4 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
o.Expect(lrlErr4).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("NonHyperShiftHOST-Author:jechen-Medium-61312-Unsupported scenarios in expanding cluster networks should be denied. [Disruptive]", func() {
ipStackType := checkIPStackType(oc)
if ipStackType != "ipv4single" {
g.Skip("The feature is currently supported on IPv4 cluster only, skip for other IP stack type for now")
}
origNetworkCIDR, orighostPrefix := getClusterNetworkInfo(oc)
origNetAddress := strings.Split(origNetworkCIDR, "/")[0]
origNetMaskVal, _ := strconv.Atoi(strings.Split(origNetworkCIDR, "/")[1])
origHostPrefixVal, _ := strconv.Atoi(orighostPrefix)
e2e.Logf("Original netAddress:%v, netMask:%v, hostPrefix: %v", origNetAddress, origNetMaskVal, origHostPrefixVal)
g.By("1. Verify that decreasing IP space by larger CIDR mask is not allowed")
newCIDR := origNetAddress + "/" + strconv.Itoa(origNetMaskVal+1)
e2e.Logf("Attempt to change to newCIDR: %v", newCIDR)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
restorePatchValue := "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + origNetworkCIDR + "\", \"hostPrefix\":" + orighostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
defer patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
patchValue := "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + newCIDR + "\", \"hostPrefix\":" + orighostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [reducing IP range with a larger CIDR mask for clusterNetwork CIDR is unsupported]`))
// restore to original valid config before next step
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [reducing IP range with a larger CIDR mask for clusterNetwork CIDR is unsupported]`))
g.By("2. Verify that changing hostPrefix is not allowed")
newHostPrefix := strconv.Itoa(origHostPrefixVal + 1)
e2e.Logf("Attempt to change to newHostPrefix: %v", newHostPrefix)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
patchValue = "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + origNetworkCIDR + "\", \"hostPrefix\":" + newHostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
// restore to original valid config before next step
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
newHostPrefix = strconv.Itoa(origHostPrefixVal - 1)
e2e.Logf("Attempt to change to newHostPrefix: %v", newHostPrefix)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
patchValue = "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + origNetworkCIDR + "\", \"hostPrefix\":" + newHostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
// restore to original valid config before next step
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
g.By("3. Verify that changing network IP is not allowed")
subAddress := strings.Split(origNetAddress, ".")
subAddressB, _ := strconv.Atoi(subAddress[1])
newSubAddressB := strconv.Itoa(subAddressB + 1)
newNetAddress := subAddress[0] + "." + newSubAddressB + "." + subAddress[2] + "." + subAddress[3]
newCIDR = newNetAddress + "/" + strconv.Itoa(origNetMaskVal)
e2e.Logf("Attempt to change to newCIDR: %v", newCIDR)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
patchValue = "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + newCIDR + "\", \"hostPrefix\":" + orighostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [modifying IP network value for clusterNetwork CIDR is unsupported]`))
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [modifying IP network value for clusterNetwork CIDR is unsupported]`))
})
//author: [email protected]
//bug: https://issues.redhat.com/browse/OCPBUGS-2827
g.It("NonHyperShiftHOST-ConnectedOnly-ROSA-OSD_CCS-Author:zzhao-Medium-64297- check nodeport service with large mtu.[Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
hostPortServiceFile = filepath.Join(buildPruningBaseDir, "ocpbug-2827/hostport.yaml")
mtuTestFile = filepath.Join(buildPruningBaseDir, "ocpbug-2827/mtutest.yaml")
ns1 = "openshift-kube-apiserver"
)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "aws")
if !acceptedPlatform {
g.Skip("Test cases should be run on AWS cluster with ovn network plugin, skip for other platforms or other network plugin!!")
}
g.By("create nodeport service in namespace")
defer removeResource(oc, true, true, "-f", hostPortServiceFile, "-n", ns1)
createResourceFromFile(oc, ns1, hostPortServiceFile)
g.By("create mtutest pod")
defer removeResource(oc, true, true, "-f", mtuTestFile, "-n", ns1)
createResourceFromFile(oc, ns1, mtuTestFile)
err := waitForPodWithLabelReady(oc, ns1, "app=mtu-tester")
exutil.AssertWaitPollNoErr(err, "this pod with label app=mtu-tester not ready")
mtuTestPod := getPodName(oc, ns1, "app=mtu-tester")
g.By("get one nodeip")
PodNodeName, nodeErr := exutil.GetPodNodeName(oc, ns1, mtuTestPod[0])
o.Expect(nodeErr).NotTo(o.HaveOccurred())
nodeIp := getNodeIPv4(oc, ns1, PodNodeName)
output, err := e2eoutput.RunHostCmd(ns1, mtuTestPod[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(nodeIp, "31251")+"?mtu=8849 2>/dev/null | cut -b-10")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "Terminated")).To(o.BeFalse())
output, err = e2eoutput.RunHostCmd(ns1, mtuTestPod[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(nodeIp, "31251")+"?mtu=8850 2>/dev/null | cut -b-10")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "Terminated")).To(o.BeFalse())
})
// author: [email protected]
g.It("Author:anusaxen-High-64151-check node healthz port is enabled for ovnk in CNO for GCP", func() {
e2e.Logf("It is for OCPBUGS-7158")
platform := checkPlatform(oc)
if !strings.Contains(platform, "gcp") {
g.Skip("Skip for un-expected platform,not GCP!")
}
g.By("Expect healtz-bind-address to be present in ovnkube-config config map")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-ovn-kubernetes", "ovnkube-config", "-ojson").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "0.0.0.0:10256")).To(o.BeTrue())
g.By("Make sure healtz-bind-address is reachable via nodes")
worker_node, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
output, err = exutil.DebugNode(oc, worker_node, "bash", "-c", "curl -v http://0.0.0.0:10256/healthz")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("HTTP/1.1 200 OK"))
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:jechen-High-68418-Same name pod can be recreated on new node and still work on OVN cluster. [Disruptive]", func() {
// This is for customer bug: https://issues.redhat.com/browse/OCPBUGS-18681
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
kubeletKillerPodTemplate := filepath.Join(buildPruningBaseDir, "kubelet-killer-pod-template.yaml")
exutil.By("1. Create a new machineset, get the new node created\n")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-68418"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
e2e.Logf("Get nodeName: %v", nodeName)
exutil.By("2. Create kubelet-killer pod on the node\n")
kkPod := kubeletKillerPod{
name: "kubelet-killer-68418",
namespace: "openshift-machine-api",
nodename: nodeName,
template: kubeletKillerPodTemplate,
}
kkPod.createKubeletKillerPodOnNode(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "kubelet-killer-68418", "-n", kkPod.namespace, "--ignore-not-found=true").Execute()
// After Kubelet-killer pod is created, it kills the node it resides on, kubelet-killer pod quickly transitioned into pending phase and stays in pending phase after its node becomes NotReady
podStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", kkPod.name, "-n", kkPod.namespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("kkPod status:%v", podStatus)
o.Expect(regexp.MatchString("Pending", podStatus)).Should(o.BeTrue())
// node is expected to be in NotReady state after kubelet killer pod kills its kubelet
checkNodeStatus(oc, nodeName, "NotReady")
exutil.By("3. Delete the node and its machineset, and delete the kubelet-killer pod\n")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("machines.machine.openshift.io", machineName[0], "-n", "openshift-machine-api").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// Verify the machineset is deleted
ms.DeleteMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 0, machinesetName)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "kubelet-killer-68418", "-n", kkPod.namespace, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Recreate the machineset, get the newer node created\n")
ms2 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer ms2.DeleteMachineSet(oc)
ms2.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName = clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
newNodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
exutil.By("5. Recreate kubelet-killer pod with same pod name on the newer node\n")
kkPod2 := kubeletKillerPod{
name: "kubelet-killer-68418",
namespace: "openshift-machine-api",
nodename: newNodeName,
template: kubeletKillerPodTemplate,
}
kkPod2.createKubeletKillerPodOnNode(oc)
// After Kubelet-killer pod2 is created, it kills the node it resides on, kubelet-killer pod quickly transitioned into pending phase and stays in pending phase after its node becomes NotReady
podStatus, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", kkPod2.name, "-n", kkPod2.namespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("kkPod2 status:%v", podStatus)
o.Expect(regexp.MatchString("Pending", podStatus)).Should(o.BeTrue())
// Verify kubelet-killer pod was able to be recreated and does it job of killing the node
checkNodeStatus(oc, newNodeName, "NotReady")
exutil.By("6. Verify ErrorAddingLogicalPort or FailedCreateSandBox events are not generated when pod is recreated\n")
podDescribe, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", kkPod2.name, "-n", kkPod2.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(regexp.MatchString("ErrorAddingLogicalPort", podDescribe)).Should(o.BeFalse())
o.Expect(regexp.MatchString("FailedCreatedPodSandBox", podDescribe)).Should(o.BeFalse())
exutil.By("7. Cleanup after test: delete the node and its machineset, then delete the kubelet-killer pod\n")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("machines.machine.openshift.io", machineName[0], "-n", "openshift-machine-api").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// Verify the machineset is deleted
ms.DeleteMachineSet(oc)
// 960s total wait.poll time may not be enough for some type of clusters, add some sleep time before WaitForMachinesRunning
time.Sleep(180 * time.Second)
clusterinfra.WaitForMachinesRunning(oc, 0, machinesetName)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "kubelet-killer-68418", "-n", kkPod.namespace, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
// https://issues.redhat.com/browse/OCPBUGS-4825
g.It("Author:asood-Medium-66047-[FdpOvnOvs] Verify allocated IP address of the pod on a specific node with completed status when delete is released in OVN DB", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
completedPodNodeTemplate = filepath.Join(buildPruningBaseDir, "completed-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items)).NotTo(o.BeEquivalentTo(0))
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create pods with completed status")
for i := 0; i < 50; i++ {
podns := pingPodResourceNode{
name: "completed-pod-" + strconv.Itoa(i),
namespace: ns,
nodename: nodeList.Items[0].Name,
template: completedPodNodeTemplate,
}
podns.createPingPodNode(oc)
}
exutil.By("Count all the pods with completed status")
allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=completed-pod")
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(len(allPods)).To(o.BeEquivalentTo(50))
// Allow the last pod IP to be released before checking NB DB
time.Sleep(10 * time.Second)
exutil.By("Verify there are no IP in NB DB for the completed pods")
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
getCmd := fmt.Sprintf("ovn-nbctl show | grep '%s' | wc -l", ns)
getCount, getCmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", getCmd)
o.Expect(getCmdErr).NotTo(o.HaveOccurred())
o.Expect(strconv.Atoi(getCount)).To(o.BeEquivalentTo(0))
exutil.By("Delete all the pods with completed status")
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "-l", "name=completed-pod", "-n", ns).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
})
g.It("Author:qiowang-Medium-69761-Check apbexternalroute status when all zones reported success", func() {
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
exutil.By("1. Create Admin Policy Based External route object")
ns := oc.Namespace()
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-69761",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("2. Check status of apbexternalroute object")
checkErr := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(messages).Should(o.ContainSubstring(node + ": configured external gateway IPs: " + apbExternalRoute.ip1 + "," + apbExternalRoute.ip2))
}
})
g.It("Author:qiowang-Medium-69762-Check egressfirewall status when all zones reported success", func() {
ipStackType := checkIPStackType(oc)
var egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2021::/96"
} else if ipStackType == "ipv6single" {
egressFWCIDR1 = "2021::/96"
egressFWCIDR2 = "2022::/96"
} else {
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2.1.2.0/24"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Create egressfirewall object")
ns := oc.Namespace()
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("2. Check status of egressfirewall object")
checkErr := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(messages).Should(o.ContainSubstring(node + ": EgressFirewall Rules applied"))
}
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:huirwang-High-69198-Oversized UDP packet handling. [Disruptive]", func() {
//It is for customer bug https://issues.redhat.com/browse/OCPBUGS-23334
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
)
// This case needs an external host, will run it on rdu1 cluster only.
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1 cluster, skip for other envrionment!!!")
}
exutil.By("Switch to local gate way mode.")
defer switchOVNGatewayMode(oc, "shared")
switchOVNGatewayMode(oc, "local")
ns1 := oc.Namespace()
workers := excludeSriovNodes(oc)
o.Expect(len(workers) > 0).Should(o.BeTrue())
exutil.By("create a hello pod in first namespace")
pod1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns1,
nodename: workers[0],
template: pingPodTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Label one worker node as egress node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[0], "k8s.ovn.org/egress-assignable")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], "k8s.ovn.org/egress-assignable", "true")
exutil.By("Create egressIP object")
freeIPs := findFreeIPs(oc, workers[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip1 := egressIPResource1{
name: "egressip-69198",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer removeResource(oc, true, true, "egressip", egressip1.name)
egressip1.createEgressIPObject1(oc)
verifyExpectedEIPNumInEIPObject(oc, egressip1.name, 1)
exutil.By("Add matched label to test namespace")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Start iperf3 on external host")
iperfServerCmd := "nohup iperf3 -s &"
exteranlHost := "10.8.1.181"
defer func() {
err = sshRunCmd(exteranlHost, "root", "pkill iperf3 &")
o.Expect(err).NotTo(o.HaveOccurred())
}()
go func() {
err = sshRunCmd(exteranlHost, "root", iperfServerCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}()
// iperf3 would start in parallel, adding wait time to ensure iperf3 started.
time.Sleep(10 * time.Second)
exutil.By("Start iperf3 client on test pod and send udp traffic")
iperfClientCmd := "iperf3 -u -n 1647 -l 1647 -c 192.168.111.1 -R -d -i 10"
res, err := exutil.RemoteShPodWithBash(oc, ns1, pod1.name, iperfClientCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(res, "iperf Done")).Should(o.BeTrue(), fmt.Sprintf("The client sent large packet to server failed with message: %s", res))
o.Expect(strings.Contains(res, "iperf3: error - control socket has closed unexpectedly")).ShouldNot(o.BeTrue(), fmt.Sprintf("The client sokcet was closed unexpectedly with error :%s", res))
exutil.By("Remove matched label to test namespace")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Again--start iperf3 client on test pod and send udp traffic")
res, err = exutil.RemoteShPodWithBash(oc, ns1, pod1.name, iperfClientCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(res, "iperf Done")).Should(o.BeTrue(), fmt.Sprintf("The client sent large packet to server failed with message: %s", res))
o.Expect(strings.Contains(res, "iperf3: error - control socket has closed unexpectedly")).ShouldNot(o.BeTrue(), fmt.Sprintf("The client sokcet was closed unexpectedly with error :%s", res))
})
g.It("Author:qiowang-Medium-69875-Check apbexternalroute status when there is zone reported failure [Disruptive]", func() {
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
workerNode, getWorkerErr := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(getWorkerErr).NotTo(o.HaveOccurred())
exutil.By("1. Create pod on one worker node")
ns := oc.Namespace()
pod := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: workerNode,
template: pingPodNodeTemplate,
}
defer pod.deletePingPodNode(oc)
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("2. Remove node annotation k8s.ovn.org/l3-gateway-config")
annotation, getAnnotationErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node/"+workerNode, "-o", "jsonpath='{.metadata.annotations.k8s\\.ovn\\.org/l3-gateway-config}'").Output()
o.Expect(getAnnotationErr).NotTo(o.HaveOccurred())
defer exutil.AddAnnotationsToSpecificResource(oc, "node/"+workerNode, "", "k8s.ovn.org/l3-gateway-config="+strings.Trim(annotation, "'"))
exutil.RemoveAnnotationFromSpecificResource(oc, "node/"+workerNode, "", "k8s.ovn.org/l3-gateway-config")
exutil.By("3. Create Admin Policy Based External route object")
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-69875",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("4. Check status of apbexternalroute object")
checkErr := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Fail")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("apbexternalroute %s doesn't show Fail in time", apbExternalRoute.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
if node == workerNode {
o.Expect(messages).Should(o.ContainSubstring(node + ": " + node + " failed to apply policy"))
} else {
o.Expect(messages).Should(o.ContainSubstring(node + ": configured external gateway IPs: " + apbExternalRoute.ip1 + "," + apbExternalRoute.ip2))
}
}
})
g.It("Author:qiowang-Medium-69873-Medium-69874-Check apbexternalroute/egressfirewall status when no failure reported and not all zones reported success [Disruptive]", func() {
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodes) < 2 {
g.Skip("Not enough nodes for the test, need at least 2 linux nodes, skip the case!!")
}
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2, egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2021::/96"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
egressFWCIDR1 = "2021::/96"
egressFWCIDR2 = "2022::/96"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2.1.2.0/24"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Reboot one worker node, wait it becomes NotReady")
workerNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer checkNodeStatus(oc, workerNode, "Ready")
defaultInt := "br-ex"
fileContent := fmt.Sprintf("ifconfig %s down; sleep 120; ifconfig %s up;", defaultInt, defaultInt)
createFileCmd := `echo -e "` + fileContent + `" > /tmp/test.sh`
_, err1 := exutil.DebugNodeWithChroot(oc, workerNode, "bash", "-c", createFileCmd)
o.Expect(err1).NotTo(o.HaveOccurred())
delFileCmd := "rm -rf /tmp/test.sh"
defer exutil.DebugNodeWithChroot(oc, workerNode, "bash", "-c", delFileCmd)
chmodCmd := "chmod +x /tmp/test.sh"
_, err2 := exutil.DebugNodeWithChroot(oc, workerNode, "bash", "-c", chmodCmd)
o.Expect(err2).NotTo(o.HaveOccurred())
testCmd := "/tmp/test.sh"
runCmd, _, _, runCmdErr := oc.AsAdmin().Run("debug").Args("node/"+workerNode, "--to-namespace", "default", "--", "chroot", "/host", "bash", "-c", testCmd).Background()
defer runCmd.Process.Kill()
o.Expect(runCmdErr).NotTo(o.HaveOccurred())
checkNodeStatus(oc, workerNode, "NotReady")
exutil.By("2. Create Admin Policy Based External route object with static gateway when the worker node in NotReady status")
ns := oc.Namespace()
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-69873",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("3. Create egressfirewall object with allow rule when the worker node in NotReady status")
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("4. Check status of apbexternalroute/egressfirewall object")
apbExtRouteSta, apbExtRouteStaErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.status}`).Output()
o.Expect(apbExtRouteStaErr).NotTo(o.HaveOccurred())
o.Expect(apbExtRouteSta).Should(o.BeEmpty())
apbExtRouteMsgs, apbExtRouteMsgsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr).NotTo(o.HaveOccurred())
egressFWStatus, egressFWStatusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.status}`).Output()
o.Expect(egressFWStatusErr).NotTo(o.HaveOccurred())
o.Expect(egressFWStatus).Should(o.BeEmpty())
egressFWMsgs, egressFWMsgsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
if node == workerNode {
o.Expect(strings.Contains(apbExtRouteMsgs, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).ShouldNot(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs, node+": EgressFirewall Rules applied")).ShouldNot(o.BeTrue())
} else {
o.Expect(strings.Contains(apbExtRouteMsgs, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs, node+": EgressFirewall Rules applied")).Should(o.BeTrue())
}
}
exutil.By("5. Wait for the rebooted worker node back")
checkNodeStatus(oc, workerNode, "Ready")
exutil.By("6. Check status of apbexternalroute/egressfirewall object after the rebooted worker node back")
apbExtRouteCheckErr := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs2, apbExtRouteMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr2).NotTo(o.HaveOccurred())
egressFWCheckErr := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs2, egressFWMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr2).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(strings.Contains(apbExtRouteMsgs2, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs2, node+": EgressFirewall Rules applied")).Should(o.BeTrue())
}
})
g.It("Author:qiowang-Medium-69876-Check egressfirewall status when there is zone reported failure", func() {
ipStackType := checkIPStackType(oc)
var egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
egressFWCIDR1 = "1.1.1.1"
egressFWCIDR2 = "2011::11"
} else if ipStackType == "ipv6single" {
egressFWCIDR1 = "2011::11"
egressFWCIDR2 = "2012::11"
} else {
egressFWCIDR1 = "1.1.1.1"
egressFWCIDR2 = "2.1.1.1"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Create egressfirewall object which missing CIDR prefix")
ns := oc.Namespace()
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("2. Check status of egressfirewall object")
checkErr := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules not correctly applied")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("EgressFirewall Rule %s doesn't show failure in time", egressFW.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(strings.Contains(messages, node+": EgressFirewall Rules not correctly applied")).Should(o.BeTrue())
}
})
g.It("NonHyperShiftHOST-NonPreRelease-Author:qiowang-Medium-70011-Medium-70012-Check apbexternalroute/egressfirewall status when machine added/removed [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2, egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2021::/96"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
egressFWCIDR1 = "2021::/96"
egressFWCIDR2 = "2022::/96"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2.1.2.0/24"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Create Admin Policy Based External route object with static gateway")
ns := oc.Namespace()
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-70011",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("2. Create egressfirewall object with allow rule")
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("3. Check status of apbexternalroute/egressfirewall object")
apbExtRouteCheckErr1 := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr1, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs1, apbExtRouteMsgsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr1).NotTo(o.HaveOccurred())
egressFWCheckErr1 := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr1, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs1, egressFWMsgsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr1).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(strings.Contains(apbExtRouteMsgs1, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs1, node+": EgressFirewall Rules applied")).Should(o.BeTrue())
}
exutil.By("4. Add machine")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-70011"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
newNode := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
e2e.Logf("New node is:%s", newNode)
exutil.By("5. Check status of apbexternalroute/egressfirewall object when new machine added")
apbExtRouteCheckErr2 := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr2, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs2, apbExtRouteMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(apbExtRouteMsgs2, newNode+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
egressFWCheckErr2 := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr2, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs2, egressFWMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(egressFWMsgs2, newNode+": EgressFirewall Rules applied")).Should(o.BeTrue())
exutil.By("6. Remove machine")
ms.DeleteMachineSet(oc)
clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
exutil.By("7. Check status of apbexternalroute/egressfirewall object after machine removed")
apbExtRouteCheckErr3 := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr3, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs3, apbExtRouteMsgsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr3).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(apbExtRouteMsgs3, newNode+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).ShouldNot(o.BeTrue())
egressFWCheckErr3 := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr3, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs3, egressFWMsgsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr3).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(egressFWMsgs3, newNode+": EgressFirewall Rules applied")).ShouldNot(o.BeTrue())
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:jechen-High-72028-Join switch IP and management port IP for newly added node should be synced correctly into NBDB, pod on new node can communicate with old pod on old node. [Disruptive]", func() {
// This is for customer bug: https://issues.redhat.com/browse/OCPBUGS-28724
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allowFromAllNSNetworkPolicyFile := filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-all-namespaces.yaml")
clusterinfra.SkipConditionally(oc)
exutil.By("1. Get an existing schedulable node\n")
currentNodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
oldNode := currentNodeList.Items[0].Name
exutil.By("2. Obtain the namespace\n")
ns1 := oc.Namespace()
exutil.By("3.Create a network policy in the namespace\n")
createResourceFromFile(oc, ns1, allowFromAllNSNetworkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-all-namespaces"))
exutil.By("4. Create a test pod on the namespace on the existing node\n")
podOnOldNode := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: oldNode,
template: pingPodNodeTemplate,
}
podOnOldNode.createPingPodNode(oc)
waitPodReady(oc, podOnOldNode.namespace, podOnOldNode.name)
exutil.By("5. Create a new machineset, get the new node created\n")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-72028"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
newNodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
e2e.Logf("Get new node name: %s", newNodeName)
exutil.By("6. Create second namespace,create another test pod in it on the new node\n")
oc.SetupProject()
ns2 := oc.Namespace()
podOnNewNode := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: newNodeName,
template: pingPodNodeTemplate,
}
podOnNewNode.createPingPodNode(oc)
waitPodReady(oc, podOnNewNode.namespace, podOnNewNode.name)
exutil.By("7. Get management IP(s) and join switch IP(s) for the new node\n")
ipStack := checkIPStackType(oc)
var nodeOVNK8sMgmtIPv4, nodeOVNK8sMgmtIPv6 string
if ipStack == "dualstack" || ipStack == "ipv6single" {
nodeOVNK8sMgmtIPv6 = getOVNK8sNodeMgmtIPv6(oc, newNodeName)
}
if ipStack == "dualstack" || ipStack == "ipv4single" {
nodeOVNK8sMgmtIPv4 = getOVNK8sNodeMgmtIPv4(oc, newNodeName)
}
e2e.Logf("\n ipStack type: %s, nodeOVNK8sMgmtIPv4: %s, nodeOVNK8sMgmtIPv6: ---->%s<---- \n", ipStack, nodeOVNK8sMgmtIPv4, nodeOVNK8sMgmtIPv6)
joinSwitchIPv4, joinSwitchIPv6 := getJoinSwitchIPofNode(oc, newNodeName)
e2e.Logf("\n Got joinSwitchIPv4: %v, joinSwitchIPv6: %v\n", joinSwitchIPv4, joinSwitchIPv6)
exutil.By("8. Check host network adresses in each node's northdb, it should include join switch IP and management IP of newly added node\n")
allNodeList, nodeErr := exutil.GetAllNodes(oc)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
o.Expect(len(allNodeList)).NotTo(o.BeEquivalentTo(0))
for _, eachNodeName := range allNodeList {
ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", eachNodeName)
o.Expect(podErr).NotTo(o.HaveOccurred())
o.Expect(ovnKubePod).ShouldNot(o.Equal(""))
if ipStack == "dualstack" || ipStack == "ipv4single" {
externalIDv4 := "external_ids:\\\"k8s.ovn.org/id\\\"=\\\"default-network-controller:Namespace:openshift-host-network:v4\\\""
hostNetworkIPsv4 := getHostNetworkIPsinNBDB(oc, eachNodeName, externalIDv4)
e2e.Logf("\n Got hostNetworkIPsv4 for node %s : %v\n", eachNodeName, hostNetworkIPsv4)
o.Expect(contains(hostNetworkIPsv4, nodeOVNK8sMgmtIPv4)).Should(o.BeTrue(), fmt.Sprintf("New node's mgmt IPv4 is not updated to node %s in NBDB!", eachNodeName))
o.Expect(unorderedContains(hostNetworkIPsv4, joinSwitchIPv4)).Should(o.BeTrue(), fmt.Sprintf("New node's join switch IPv4 is not updated to node %s in NBDB!", eachNodeName))
}
if ipStack == "dualstack" || ipStack == "ipv6single" {
externalIDv6 := "external_ids:\\\"k8s.ovn.org/id\\\"=\\\"default-network-controller:Namespace:openshift-host-network:v6\\\""
hostNetworkIPsv6 := getHostNetworkIPsinNBDB(oc, eachNodeName, externalIDv6)
e2e.Logf("\n Got hostNetworkIPsv6 for node %s : %v\n", eachNodeName, hostNetworkIPsv6)
o.Expect(contains(hostNetworkIPsv6, nodeOVNK8sMgmtIPv6)).Should(o.BeTrue(), fmt.Sprintf("New node's mgmt IPv6 is not updated to node %s in NBDB!", eachNodeName))
o.Expect(unorderedContains(hostNetworkIPsv6, joinSwitchIPv6)).Should(o.BeTrue(), fmt.Sprintf("New node's join switch IPv6 is not updated to node %s in NBDB!", eachNodeName))
}
}
exutil.By("9. Verify that new pod on new node can communicate with old pod on old node \n")
CurlPod2PodPass(oc, podOnOldNode.namespace, podOnOldNode.name, podOnNewNode.namespace, podOnNewNode.name)
CurlPod2PodPass(oc, podOnNewNode.namespace, podOnNewNode.name, podOnOldNode.namespace, podOnOldNode.name)
})
g.It("Author:qiowang-Medium-68920-kubernetes service route is recoverable if it's cleared [Disruptive]", func() {
e2e.Logf("It is for OCPBUGS-1715")
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get service subnets")
svcSubnetStr, getSubnetsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("network.operator", "cluster", `-ojsonpath={.spec.serviceNetwork}`).Output()
o.Expect(getSubnetsErr).NotTo(o.HaveOccurred())
svcSubnets := strings.Split(strings.Trim(svcSubnetStr, "[]"), ",")
for _, svcSubnet := range svcSubnets {
svcSubnet := strings.Trim(svcSubnet, `"`)
var verFlag string
if strings.Count(svcSubnet, ":") >= 2 {
verFlag = "-6"
} else if strings.Count(svcSubnet, ".") >= 2 {
verFlag = "-4"
}
exutil.By("Delete service route on one of the worker node")
origSvcRouteStr, getRouteErr := exutil.DebugNode(oc, nodeName, "ip", verFlag, "route", "show", svcSubnet)
e2e.Logf("original service route is: -- %s --", origSvcRouteStr)
o.Expect(getRouteErr).NotTo(o.HaveOccurred())
re := regexp.MustCompile(svcSubnet + ".*\n")
origSvcRouteLine := re.FindAllString(origSvcRouteStr, -1)[0]
origSvcRoute := strings.Trim(origSvcRouteLine, "\n")
defer func() {
svcRoute1, deferErr := exutil.DebugNode(oc, nodeName, "ip", verFlag, "route", "show", svcSubnet)
o.Expect(deferErr).NotTo(o.HaveOccurred())
if !strings.Contains(svcRoute1, origSvcRoute) {
addCmd := "ip " + verFlag + " route add " + origSvcRoute
exutil.DebugNode(oc, nodeName, "bash", "-c", addCmd)
}
}()
delCmd := "ip " + verFlag + " route del " + origSvcRoute
_, delRouteErr := exutil.DebugNode(oc, nodeName, "bash", "-c", delCmd)
o.Expect(delRouteErr).NotTo(o.HaveOccurred())
exutil.By("Check the service route is restored")
routeOutput := wait.Poll(15*time.Second, 300*time.Second, func() (bool, error) {
svcRoute, getRouteErr1 := exutil.DebugNode(oc, nodeName, "ip", verFlag, "route", "show", svcSubnet)
o.Expect(getRouteErr1).NotTo(o.HaveOccurred())
if strings.Contains(svcRoute, origSvcRoute) {
return true, nil
}
e2e.Logf("Route is not restored and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(routeOutput, fmt.Sprintf("Fail to restore route and the error is:%s", routeOutput))
exutil.By("Check the log for restore the service route")
ovnkubePod, getPodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(getPodErr).NotTo(o.HaveOccurred())
filter := "'Route Manager:.*Dst: " + svcSubnet + "' | tail -1"
podLogs, getLogErr := checkLogMessageInPod(oc, "openshift-ovn-kubernetes", "ovnkube-controller", ovnkubePod, filter)
o.Expect(getLogErr).NotTo(o.HaveOccurred())
o.Expect(podLogs).To(o.ContainSubstring("netlink route addition event"))
}
})
// author: [email protected]
//bug: https://issues.redhat.com/browse/OCPBUGS-11266
g.It("Author:anusaxen-Medium-66884-Larger packet size than Cluster MTU should not cause packet drops", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
platform := checkPlatform(oc)
if !strings.Contains(platform, "aws") {
g.Skip("Test requires AWS, skip for other platforms!")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("create a hello pod1 in namespace")
pod1ns := pingPodResourceNode{
name: "hello-pod1",
namespace: oc.Namespace(),
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns.createPingPodNode(oc)
waitPodReady(oc, pod1ns.namespace, pod1ns.name)
exutil.By("create a hello-pod2 in namespace")
pod2ns := pingPodResourceNode{
name: "hello-pod2",
namespace: oc.Namespace(),
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns.createPingPodNode(oc)
waitPodReady(oc, pod2ns.namespace, pod2ns.name)
exutil.By("Get IP of the hello-pod2")
helloPod2IP := getPodIPv4(oc, oc.Namespace(), "hello-pod2")
//Cluster network MTU on AWS is 8901 and negotiated MSS is 8849 which accomodates TCP and IP header etc. We will use MSS of 9000 in this test
iperfClientCmd := "iperf3 -c " + helloPod2IP + " -p 60001 -b 30M -N -V -M 9000|grep -i -A 5 'Test Complete' | grep -i -A 1 'Retr' | awk '{ print $9 }' | tail -1"
iperfServerCmd := "nohup iperf3 -s -p 60001&"
cmdBackground, _, _, errBackground := oc.Run("exec").Args("-n", pod2ns.namespace, pod2ns.name, "--", "/bin/sh", "-c", iperfServerCmd).Background()
defer cmdBackground.Process.Kill()
o.Expect(errBackground).NotTo(o.HaveOccurred())
retr_count, err := oc.Run("exec").Args("-n", pod1ns.namespace, pod1ns.name, "--", "/bin/sh", "-c", iperfClientCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("Total Retr count is \n %s", retr_count))
retr_count_int, err := strconv.Atoi(retr_count)
o.Expect(err).NotTo(o.HaveOccurred())
//iperf simulates 10 iterations with 30Mbps so we expect retr count of not more than 1 per iteration hence should not be more than 10 in total
o.Expect(retr_count_int < 11).To(o.BeTrue())
})
//author: [email protected]
g.It("Author:anusaxen-High-73205-High-72817-Make sure internalJoinSubnet and internalTransitSwitchSubnet is configurable post install as a Day 2 operation [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("create a hello pod1 in namespace")
pod1ns := pingPodResourceNode{
name: "hello-pod1",
namespace: oc.Namespace(),
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns.createPingPodNode(oc)
waitPodReady(oc, oc.Namespace(), pod1ns.name)
exutil.By("create a hello-pod2 in namespace")
pod2ns := pingPodResourceNode{
name: "hello-pod2",
namespace: oc.Namespace(),
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns.createPingPodNode(oc)
waitPodReady(oc, oc.Namespace(), pod2ns.name)
g.By("Create a test service backing up both the above pods")
svc := genericServiceResource{
servicename: "test-service-73205",
namespace: oc.Namespace(),
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "",
template: genericServiceTemplate,
}
if ipStackType == "ipv4single" {
svc.ipFamilyPolicy = "SingleStack"
} else {
svc.ipFamilyPolicy = "PreferDualStack"
}
svc.createServiceFromParams(oc)
//custom patches to test depending on type of cluster addressing
customPatchIPv4 := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"100.99.0.0/16\",\"internalTransitSwitchSubnet\": \"100.69.0.0/16\"}}}}}"
customPatchIPv6 := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv6\":{\"internalJoinSubnet\": \"ab98::/64\",\"internalTransitSwitchSubnet\": \"ab97::/64\"}}}}}"
customPatchDualstack := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"100.99.0.0/16\",\"internalTransitSwitchSubnet\": \"100.69.0.0/16\"},\"ipv6\": {\"internalJoinSubnet\": \"ab98::/64\",\"internalTransitSwitchSubnet\": \"ab97::/64\"}}}}}"
//gather original cluster values so that we can defer to them later once test done
currentinternalJoinSubnetIPv4Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv4.internalJoinSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentinternalTransitSwSubnetIPv4Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv4.internalTransitSwitchSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentinternalJoinSubnetIPv6Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv6.internalJoinSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentinternalTransitSwSubnetIPv6Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv6.internalTransitSwitchSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//if any of value is null on exisiting cluster, it indicates that cluster came up with following default values assigned by OVNK
if (currentinternalJoinSubnetIPv4Value == "") || (currentinternalJoinSubnetIPv6Value == "") {
currentinternalJoinSubnetIPv4Value = "100.64.0.0/16"
currentinternalTransitSwSubnetIPv4Value = "100.88.0.0/16"
currentinternalJoinSubnetIPv6Value = "fd98::/64"
currentinternalTransitSwSubnetIPv6Value = "fd97::/64"
}
//vars to patch cluster back to original state
patchIPv4original := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv4Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv4Value + "\"}}}}}"
patchIPv6original := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv6\":{\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv6Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv6Value + "\"}}}}}"
patchDualstackoriginal := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv4Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv4Value + "\"},\"ipv6\": {\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv6Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv6Value + "\"}}}}}"
if ipStackType == "ipv4single" {
defer func() {
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", patchIPv4original)
err := checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
}()
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", customPatchIPv4)
} else if ipStackType == "ipv6single" {
defer func() {
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", patchIPv6original)
err := checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
}()
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", customPatchIPv6)
} else {
defer func() {
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", patchDualstackoriginal)
err := checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
}()
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", customPatchDualstack)
}
err = checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube never trigger or rolled out successfully post oc patch"))
//check usual svc and pod connectivities post migration which also ensures disruption doesn't last post successful rollout
CurlPod2PodPass(oc, oc.Namespace(), pod1ns.name, oc.Namespace(), pod2ns.name)
CurlPod2SvcPass(oc, oc.Namespace(), oc.Namespace(), pod1ns.name, "test-service-73205")
})
// author: [email protected]
g.It("Author:jechen-ConnectedOnly-High-74589-Pod-to-external TCP connectivity using port in range of snat port.", func() {
// For customer bug https://issues.redhat.com/browse/OCPBUGS-32202
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
testPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
url := "www.example.com"
ipStackType := checkIPStackType(oc)
if checkDisconnect(oc) || ipStackType == "ipv6single" {
g.Skip("Skip the test on disconnected cluster or singlev6 cluster.")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough node available, need at least one node for the test, skip the case!!")
}
exutil.By("1. create a namespace, create nodeport service on one node")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("2. Create a hello pod in ns")
pod1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: testPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("3. Create a nodePort type service fronting the above pod")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "NodePort",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
if ipStackType == "dualstack" {
svc.ipFamilyPolicy = "PreferDualStack"
} else {
svc.ipFamilyPolicy = "SingleStack"
}
defer removeResource(oc, true, true, "service", svc.servicename, "-n", svc.namespace)
svc.createServiceFromParams(oc)
exutil.By("4. Get NodePort at which service listens.")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. From external, curl NodePort service with its port to make sure NodePort service works")
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
exutil.By("6. Create another test pod on another node, from the test pod to curl local port of external url, verify the connection can succeed\n")
pod2 := pingPodResourceNode{
name: "testpod",
namespace: ns,
nodename: nodeList.Items[1].Name,
template: testPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns, pod2.name)
cmd := fmt.Sprintf("curl --local-port 32012 -v -I -L http://%s", url)
expectedString := fmt.Sprintf(`^* Connected to %s \(([\d\.]+)\) port 80 `, url)
re := regexp.MustCompile(expectedString)
connectErr := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
_, execCmdOutput, err := e2eoutput.RunHostCmdWithFullOutput(ns, pod2.name, cmd)
if err != nil {
e2e.Logf("Getting err :%v, trying again...", err)
return false, nil
}
if !re.MatchString(execCmdOutput) {
e2e.Logf("Did not get expected output, trying again...")
e2e.Logf("\n execCmdOutput is %v\n", execCmdOutput)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(connectErr, fmt.Sprintf("Connection to %s did not succeed!", url))
})
// author: [email protected]
g.It("Author:huirwang-High-75613-Should be able to access applications when client ephemeral port is 22623 or 22624", func() {
// https://issues.redhat.com/browse/OCPBUGS-37541
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
g.By("Get new namespace")
ns1 := oc.Namespace()
g.By("Create test pods")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("Should be able to access applications when client ephemeral port is 22623 or 22624")
testPodName := getPodName(oc, ns1, "name=test-pods")
pod1Name := testPodName[0]
localPort := []string{"22623", "22624"}
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
pod2IP1, pod2IP2 := getPodIP(oc, ns1, testPodName[1])
for i := 0; i < 2; i++ {
curlCmd := fmt.Sprintf("curl --connect-timeout 5 -s %s --local-port %s", net.JoinHostPort(pod2IP1, "8080"), localPort[i])
_, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name, curlCmd, 60*time.Second, 120*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
curlCmd = fmt.Sprintf("curl --connect-timeout 5 -s %s --local-port %s", net.JoinHostPort(pod2IP2, "8080"), localPort[i])
// Need wait 1 minute for local binding port released
_, err = e2eoutput.RunHostCmdWithRetries(ns1, pod1Name, curlCmd, 60*time.Second, 120*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
pod2IP1, _ := getPodIP(oc, ns1, testPodName[1])
for i := 0; i < 2; i++ {
curlCmd := fmt.Sprintf("curl --connect-timeout 5 -s %s --local-port %s", net.JoinHostPort(pod2IP1, "8080"), localPort[i])
_, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name, curlCmd, 60*time.Second, 120*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
})
// author: [email protected]
g.It("Author:huirwang-High-75758-Bad certificate should not cause ovn pods crash. [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-36195
exutil.By("Get one worker node.")
node1, err := exutil.GetFirstCoreOsWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if len(node1) < 1 {
g.Skip("Skip the test as no enough worker nodes.")
}
exutil.By("Get the ovnkube-node pod on specific node.")
ovnPod, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", node1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovnPod).ShouldNot(o.BeEmpty())
exutil.By("Create bad ovnkube-node-certs certificate")
cmd := `cd /var/lib/ovn-ic/etc/ovnkube-node-certs && ls | grep '^ovnkube-client-.*\.pem$' | grep -v 'ovnkube-client-current.pem' | xargs -I {} sh -c 'echo "" > {}'`
_, err = exutil.DebugNodeWithChroot(oc, node1, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Restart ovnkube-node pod on specific node.")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnPod, "-n", "openshift-ovn-kubernetes", "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait ovnkube-node pod to be running")
ovnPod, err = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", node1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovnPod).ShouldNot(o.BeEmpty())
exutil.AssertPodToBeReady(oc, ovnPod, "openshift-ovn-kubernetes")
})
// author: [email protected]
g.It("Author:meinli-Medium-45146-Pod should be healthy when gw IP is single stack on dual stack cluster", func() {
// https://bugzilla.redhat.com/show_bug.cgi?id=1986708
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
ipStackType := checkIPStackType(oc)
if ipStackType != "dualstack" {
g.Skip("This case is only validate in DualStack cluster, skip it!!!")
}
exutil.By("1. Get namespace")
ns := oc.Namespace()
exutil.By("2. Create a pod in ns namespace")
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.name, "-n", pod.namespace).Execute()
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("3. Patch annotation for hello-pod")
annotationsCmd := fmt.Sprintf(`{ "metadata":{
"annotations": {
"k8s.ovn.org/routing-namespaces": "%s",
"k8s.ovn.org/routing-network": "foo",
"k8s.v1.cni.cncf.io/network-status": "[{\"name\":\"foo\",\"interface\":\"net1\",\"ips\":[\"172.19.0.5\"],\"mac\":\"01:23:45:67:89:10\"}]"
}
}
}`, ns)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("pod", pod.name, "-n", ns, "-p", annotationsCmd, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Verify pod is healthy and running")
waitPodReady(oc, ns, pod.name)
})
// author: [email protected]
g.It("Author:meinli-NonPreRelease-Medium-34674-Ensure ovnkube-master nbdb and sbdb exit properly. [Disruptive]", func() {
exutil.By("1. Enable ovnkube-master pod debug log by ovn-appctl")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
MasterNodeName, err := exutil.GetPodNodeName(oc, "openshift-ovn-kubernetes", ovnMasterPodName)
o.Expect(err).NotTo(o.HaveOccurred())
ctls := []string{"ovnnb_db.ctl", "ovnsb_db.ctl"}
for _, ctl := range ctls {
dbgCmd := fmt.Sprintf("ovn-appctl -t /var/run/ovn/%s vlog/set console:jsonrpc:dbg", ctl)
_, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, dbgCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2. Check ovnkube-master pod debug log enabled successfully and make hard-link(ln) to preserve log")
LogsPath := "/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-*"
var wg sync.WaitGroup
Database := []string{"nbdb", "sbdb"}
for _, db := range Database {
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
logPath := filepath.Join(LogsPath, db, "*.log")
checkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 20*time.Second, false, func(cxt context.Context) (bool, error) {
resultOutput, err := exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("tail -10 %s", logPath))
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(resultOutput, "jsonrpc") {
e2e.Logf("ovnkube-pod debug log has been successfully enabled!!!")
// select the most recent file to do hard-link
_, lnErr := exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("ln -v $(ls -1t %s | head -n 1) /var/log/%s.log", logPath, db))
o.Expect(lnErr).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("%v,Waiting for ovnkube-master pod debug log enable, try again ...,", err)
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Enable ovnkube-master pod debug log timeout.")
}()
}
wg.Wait()
exutil.By("3. delete the ovnkube-master pod and check log process should be exited")
defer checkOVNKState(oc)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnMasterPodName, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for _, db := range Database {
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
defer exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("rm -f /var/log/%s.log", db))
checkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 20*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("tail -10 /var/log/%s.log", db))
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, fmt.Sprintf("Exiting ovn%s_db", strings.Split(db, "db")[0])) {
e2e.Logf(fmt.Sprintf("ovnkube-master pod %s exit properly!!!", db))
return true, nil
}
e2e.Logf("%v,Waiting for ovnkube-master pod log sync up, try again ...,", err)
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Check ovnkube-master pod %s debug log timeout.", db))
}()
}
wg.Wait()
})
// author: [email protected]
g.It("Author: meinli-Medium-72506-Traffic with dst ip from service CIDR that doesn't match existing svc ip+port should be dropped", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
exutil.By("1. Get namespace and worker node")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires one node, but the cluster han't one")
}
workerNode := nodeList.Items[0].Name
ns := oc.Namespace()
exutil.By("2. create a service")
createResourceFromFile(oc, ns, testSvcFile)
ServiceOutput, serviceErr := oc.WithoutNamespace().Run("get").Args("service", "-n", ns).Output()
o.Expect(serviceErr).NotTo(o.HaveOccurred())
o.Expect(ServiceOutput).To(o.ContainSubstring("test-service"))
exutil.By("3. Curl clusterIP svc from node")
svcIP1, svcIP2 := getSvcIP(oc, ns, "test-service")
if svcIP2 != "" {
svc4URL := net.JoinHostPort(svcIP2, "27018")
output, _ := exutil.DebugNode(oc, workerNode, "curl", svc4URL, "--connect-timeout", "5")
o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed")))
}
svcURL := net.JoinHostPort(svcIP1, "27018")
output, _ := exutil.DebugNode(oc, workerNode, "curl", svcURL, "--connect-timeout", "5")
o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed")))
exutil.By("4. Validate the drop packets counter is increasing from svc network")
ovnkubePodName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", workerNode)
o.Expect(err).NotTo(o.HaveOccurred())
cmd := "ovs-ofctl dump-flows br-ex | grep -i 'priority=105'"
output, err = e2eoutput.RunHostCmd("openshift-ovn-kubernetes", ovnkubePodName, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
r := regexp.MustCompile(`n_packets=(\d+).*?actions=drop`)
matches := r.FindAllStringSubmatch(output, -1)
// only compare the latest action drop to make sure won't be influenced by other case
o.Expect(len(matches)).ShouldNot(o.Equal(0))
o.Expect(len(matches[0])).To(o.Equal(2))
o.Expect(strconv.Atoi(matches[0][1])).To(o.BeNumerically(">", 0))
exutil.By("5. Validate no packet are seen on br-ex from src")
if svcIP2 != "" {
output, err := e2eoutput.RunHostCmd("openshift-ovn-kubernetes", ovnkubePodName, fmt.Sprintf("ovs-ofctl dump-flows br-ex | grep -i 'src=%s'", svcIP2))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.BeEmpty())
}
output, err = e2eoutput.RunHostCmd("openshift-ovn-kubernetes", ovnkubePodName, fmt.Sprintf("ovs-ofctl dump-flows br-ex | grep -i 'src=%s'", svcIP1))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.BeEmpty())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
30cf3c55-1741-4a9e-a018-5b898e3cae9f
|
Author:anusaxen-Medium-49216-ovnkube-node logs should not print api token in logs.
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:anusaxen-Medium-49216-ovnkube-node logs should not print api token in logs. ", func() {
g.By("it's for bug 2009857")
workerNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ovnkubePod, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", workerNode)
o.Expect(err).NotTo(o.HaveOccurred())
podlogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(ovnkubePod, "-n", "openshift-ovn-kubernetes", "-c", "ovnkube-controller").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podlogs).NotTo(o.ContainSubstring("kube-api-token"))
g.By("ovnkube-node logs doesn't contain api-token")
})
| |||||
test case
|
openshift/openshift-tests-private
|
9ad650fd-9e9e-4d5e-b8a5-59f2a0c576dd
|
NonHyperShiftHOST-Author:zzhao-Medium-54742- Completed pod ip can be released.[Flaky]
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("NonHyperShiftHOST-Author:zzhao-Medium-54742- Completed pod ip can be released.[Flaky]", func() {
g.By("it's for bug 2091157,Check the ovnkube-master logs to see if completed pod already release ip")
result := findLogFromPod(oc, "Releasing IPs for Completed pod", "openshift-ovn-kubernetes", "app=ovnkube-node", "ovnkube-controller")
o.Expect(result).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
594787b4-ca70-44c2-9514-1fd9ab8c5bcd
|
Author:anusaxen-NonHyperShiftHOST-NonPreRelease-High-55144-[FdpOvnOvs] Switching OVN gateway modes should not delete custom routes created on node logical routers.[Disruptive]
|
['"context"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:anusaxen-NonHyperShiftHOST-NonPreRelease-High-55144-[FdpOvnOvs] Switching OVN gateway modes should not delete custom routes created on node logical routers.[Disruptive] ", func() {
exutil.By("it's for bug 2042516")
var desiredMode string
//need to find out original mode cluster is on so that we can revert back to same post test
origMode := getOVNGatewayMode(oc)
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires at least one schedulable node")
}
exutil.By("Add a logical route on a node")
nodeLogicalRouterName := "GR_" + nodeList.Items[0].Name
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
lrRouteListDelCmd := "ovn-nbctl lr-route-del " + nodeLogicalRouterName + " 192.168.122.0/24 192.168.122.4"
lrRouteListAddCmd := "ovn-nbctl lr-route-add " + nodeLogicalRouterName + " 192.168.122.0/24 192.168.122.4"
defer exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
_, lrlErr1 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListAddCmd)
o.Expect(lrlErr1).NotTo(o.HaveOccurred())
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
exutil.By("List the logical route on a node after gateway mode switch")
lrRouteListCmd := "ovn-nbctl lr-route-list " + nodeLogicalRouterName
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
defer exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
lRlOutput, lrlErr2 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListCmd)
o.Expect(lrlErr2).NotTo(o.HaveOccurred())
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.0/24"))
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.4"))
//reverting back cluster to original mode it was on and deleting fake route
switchOVNGatewayMode(oc, origMode)
exutil.By("List the logical route on a node after gateway mode revert")
ovnKNodePod, ovnkNodePodErr = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
defer exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
_, lrlErr3 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListCmd)
o.Expect(lrlErr3).NotTo(o.HaveOccurred())
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.0/24"))
o.Expect(lRlOutput).To(o.ContainSubstring("192.168.122.4"))
exutil.By("Delete the logical route on a node after gateway mode revert")
//lrRouteListDelCmd = "ovn-nbctl lr-route-del " + nodeLogicalRouterName + " 192.168.122.0/24 192.168.122.4"
_, lrlErr4 := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnKNodePod, lrRouteListDelCmd)
o.Expect(lrlErr4).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
78c23066-af1a-4f3e-bc06-1539fdaac439
|
NonHyperShiftHOST-Author:jechen-Medium-61312-Unsupported scenarios in expanding cluster networks should be denied. [Disruptive]
|
['"strconv"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("NonHyperShiftHOST-Author:jechen-Medium-61312-Unsupported scenarios in expanding cluster networks should be denied. [Disruptive]", func() {
ipStackType := checkIPStackType(oc)
if ipStackType != "ipv4single" {
g.Skip("The feature is currently supported on IPv4 cluster only, skip for other IP stack type for now")
}
origNetworkCIDR, orighostPrefix := getClusterNetworkInfo(oc)
origNetAddress := strings.Split(origNetworkCIDR, "/")[0]
origNetMaskVal, _ := strconv.Atoi(strings.Split(origNetworkCIDR, "/")[1])
origHostPrefixVal, _ := strconv.Atoi(orighostPrefix)
e2e.Logf("Original netAddress:%v, netMask:%v, hostPrefix: %v", origNetAddress, origNetMaskVal, origHostPrefixVal)
g.By("1. Verify that decreasing IP space by larger CIDR mask is not allowed")
newCIDR := origNetAddress + "/" + strconv.Itoa(origNetMaskVal+1)
e2e.Logf("Attempt to change to newCIDR: %v", newCIDR)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
restorePatchValue := "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + origNetworkCIDR + "\", \"hostPrefix\":" + orighostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
defer patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
patchValue := "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + newCIDR + "\", \"hostPrefix\":" + orighostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [reducing IP range with a larger CIDR mask for clusterNetwork CIDR is unsupported]`))
// restore to original valid config before next step
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [reducing IP range with a larger CIDR mask for clusterNetwork CIDR is unsupported]`))
g.By("2. Verify that changing hostPrefix is not allowed")
newHostPrefix := strconv.Itoa(origHostPrefixVal + 1)
e2e.Logf("Attempt to change to newHostPrefix: %v", newHostPrefix)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
patchValue = "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + origNetworkCIDR + "\", \"hostPrefix\":" + newHostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
// restore to original valid config before next step
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
newHostPrefix = strconv.Itoa(origHostPrefixVal - 1)
e2e.Logf("Attempt to change to newHostPrefix: %v", newHostPrefix)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
patchValue = "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + origNetworkCIDR + "\", \"hostPrefix\":" + newHostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
// restore to original valid config before next step
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [modifying a clusterNetwork's hostPrefix value is unsupported]`))
g.By("3. Verify that changing network IP is not allowed")
subAddress := strings.Split(origNetAddress, ".")
subAddressB, _ := strconv.Atoi(subAddress[1])
newSubAddressB := strconv.Itoa(subAddressB + 1)
newNetAddress := subAddress[0] + "." + newSubAddressB + "." + subAddress[2] + "." + subAddress[3]
newCIDR = newNetAddress + "/" + strconv.Itoa(origNetMaskVal)
e2e.Logf("Attempt to change to newCIDR: %v", newCIDR)
// patch command will be executed even though invalid config is supplied, so still call patchResourceAsAdmin function
patchValue = "{\"spec\":{\"clusterNetwork\":[{\"cidr\":\"" + newCIDR + "\", \"hostPrefix\":" + orighostPrefix + "}],\"networkType\":\"OVNKubernetes\"}}"
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", patchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).Should(o.ContainSubstring(`invalid configuration: [modifying IP network value for clusterNetwork CIDR is unsupported]`))
patchResourceAsAdmin(oc, "Network.config.openshift.io/cluster", restorePatchValue)
o.Eventually(func() string {
return getCNOStatusCondition(oc)
}, 60*time.Second, 3*time.Second).ShouldNot(o.ContainSubstring(`invalid configuration: [modifying IP network value for clusterNetwork CIDR is unsupported]`))
})
| |||||
test case
|
openshift/openshift-tests-private
|
a0dd520f-17b5-4826-acbc-c7bdbcf7ce7b
|
NonHyperShiftHOST-ConnectedOnly-ROSA-OSD_CCS-Author:zzhao-Medium-64297- check nodeport service with large mtu.[Serial]
|
['"net"', '"path/filepath"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("NonHyperShiftHOST-ConnectedOnly-ROSA-OSD_CCS-Author:zzhao-Medium-64297- check nodeport service with large mtu.[Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
hostPortServiceFile = filepath.Join(buildPruningBaseDir, "ocpbug-2827/hostport.yaml")
mtuTestFile = filepath.Join(buildPruningBaseDir, "ocpbug-2827/mtutest.yaml")
ns1 = "openshift-kube-apiserver"
)
platform := exutil.CheckPlatform(oc)
acceptedPlatform := strings.Contains(platform, "aws")
if !acceptedPlatform {
g.Skip("Test cases should be run on AWS cluster with ovn network plugin, skip for other platforms or other network plugin!!")
}
g.By("create nodeport service in namespace")
defer removeResource(oc, true, true, "-f", hostPortServiceFile, "-n", ns1)
createResourceFromFile(oc, ns1, hostPortServiceFile)
g.By("create mtutest pod")
defer removeResource(oc, true, true, "-f", mtuTestFile, "-n", ns1)
createResourceFromFile(oc, ns1, mtuTestFile)
err := waitForPodWithLabelReady(oc, ns1, "app=mtu-tester")
exutil.AssertWaitPollNoErr(err, "this pod with label app=mtu-tester not ready")
mtuTestPod := getPodName(oc, ns1, "app=mtu-tester")
g.By("get one nodeip")
PodNodeName, nodeErr := exutil.GetPodNodeName(oc, ns1, mtuTestPod[0])
o.Expect(nodeErr).NotTo(o.HaveOccurred())
nodeIp := getNodeIPv4(oc, ns1, PodNodeName)
output, err := e2eoutput.RunHostCmd(ns1, mtuTestPod[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(nodeIp, "31251")+"?mtu=8849 2>/dev/null | cut -b-10")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "Terminated")).To(o.BeFalse())
output, err = e2eoutput.RunHostCmd(ns1, mtuTestPod[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(nodeIp, "31251")+"?mtu=8850 2>/dev/null | cut -b-10")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "Terminated")).To(o.BeFalse())
})
| |||||
test case
|
openshift/openshift-tests-private
|
93b90a76-c63a-4a22-aae4-b594b893a578
|
Author:anusaxen-High-64151-check node healthz port is enabled for ovnk in CNO for GCP
|
['"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:anusaxen-High-64151-check node healthz port is enabled for ovnk in CNO for GCP", func() {
e2e.Logf("It is for OCPBUGS-7158")
platform := checkPlatform(oc)
if !strings.Contains(platform, "gcp") {
g.Skip("Skip for un-expected platform,not GCP!")
}
g.By("Expect healtz-bind-address to be present in ovnkube-config config map")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-ovn-kubernetes", "ovnkube-config", "-ojson").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "0.0.0.0:10256")).To(o.BeTrue())
g.By("Make sure healtz-bind-address is reachable via nodes")
worker_node, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
output, err = exutil.DebugNode(oc, worker_node, "bash", "-c", "curl -v http://0.0.0.0:10256/healthz")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("HTTP/1.1 200 OK"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
814f9d04-4008-4059-b27a-c0ad52aa4dfb
|
Longduration-NonPreRelease-Author:jechen-High-68418-Same name pod can be recreated on new node and still work on OVN cluster. [Disruptive]
|
['"path/filepath"', '"regexp"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Longduration-NonPreRelease-Author:jechen-High-68418-Same name pod can be recreated on new node and still work on OVN cluster. [Disruptive]", func() {
// This is for customer bug: https://issues.redhat.com/browse/OCPBUGS-18681
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
kubeletKillerPodTemplate := filepath.Join(buildPruningBaseDir, "kubelet-killer-pod-template.yaml")
exutil.By("1. Create a new machineset, get the new node created\n")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-68418"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
e2e.Logf("Get nodeName: %v", nodeName)
exutil.By("2. Create kubelet-killer pod on the node\n")
kkPod := kubeletKillerPod{
name: "kubelet-killer-68418",
namespace: "openshift-machine-api",
nodename: nodeName,
template: kubeletKillerPodTemplate,
}
kkPod.createKubeletKillerPodOnNode(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "kubelet-killer-68418", "-n", kkPod.namespace, "--ignore-not-found=true").Execute()
// After Kubelet-killer pod is created, it kills the node it resides on, kubelet-killer pod quickly transitioned into pending phase and stays in pending phase after its node becomes NotReady
podStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", kkPod.name, "-n", kkPod.namespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("kkPod status:%v", podStatus)
o.Expect(regexp.MatchString("Pending", podStatus)).Should(o.BeTrue())
// node is expected to be in NotReady state after kubelet killer pod kills its kubelet
checkNodeStatus(oc, nodeName, "NotReady")
exutil.By("3. Delete the node and its machineset, and delete the kubelet-killer pod\n")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("machines.machine.openshift.io", machineName[0], "-n", "openshift-machine-api").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// Verify the machineset is deleted
ms.DeleteMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 0, machinesetName)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "kubelet-killer-68418", "-n", kkPod.namespace, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Recreate the machineset, get the newer node created\n")
ms2 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer ms2.DeleteMachineSet(oc)
ms2.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName = clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
newNodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
exutil.By("5. Recreate kubelet-killer pod with same pod name on the newer node\n")
kkPod2 := kubeletKillerPod{
name: "kubelet-killer-68418",
namespace: "openshift-machine-api",
nodename: newNodeName,
template: kubeletKillerPodTemplate,
}
kkPod2.createKubeletKillerPodOnNode(oc)
// After Kubelet-killer pod2 is created, it kills the node it resides on, kubelet-killer pod quickly transitioned into pending phase and stays in pending phase after its node becomes NotReady
podStatus, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", kkPod2.name, "-n", kkPod2.namespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("kkPod2 status:%v", podStatus)
o.Expect(regexp.MatchString("Pending", podStatus)).Should(o.BeTrue())
// Verify kubelet-killer pod was able to be recreated and does it job of killing the node
checkNodeStatus(oc, newNodeName, "NotReady")
exutil.By("6. Verify ErrorAddingLogicalPort or FailedCreateSandBox events are not generated when pod is recreated\n")
podDescribe, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", kkPod2.name, "-n", kkPod2.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(regexp.MatchString("ErrorAddingLogicalPort", podDescribe)).Should(o.BeFalse())
o.Expect(regexp.MatchString("FailedCreatedPodSandBox", podDescribe)).Should(o.BeFalse())
exutil.By("7. Cleanup after test: delete the node and its machineset, then delete the kubelet-killer pod\n")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("machines.machine.openshift.io", machineName[0], "-n", "openshift-machine-api").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// Verify the machineset is deleted
ms.DeleteMachineSet(oc)
// 960s total wait.poll time may not be enough for some type of clusters, add some sleep time before WaitForMachinesRunning
time.Sleep(180 * time.Second)
clusterinfra.WaitForMachinesRunning(oc, 0, machinesetName)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "kubelet-killer-68418", "-n", kkPod.namespace, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
1a00d519-d33a-44c7-8a70-cb758185e0dc
|
Author:asood-Medium-66047-[FdpOvnOvs] Verify allocated IP address of the pod on a specific node with completed status when delete is released in OVN DB
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:asood-Medium-66047-[FdpOvnOvs] Verify allocated IP address of the pod on a specific node with completed status when delete is released in OVN DB", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
completedPodNodeTemplate = filepath.Join(buildPruningBaseDir, "completed-pod-specific-node-template.yaml")
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items)).NotTo(o.BeEquivalentTo(0))
exutil.By("Get namespace")
ns := oc.Namespace()
exutil.By("Create pods with completed status")
for i := 0; i < 50; i++ {
podns := pingPodResourceNode{
name: "completed-pod-" + strconv.Itoa(i),
namespace: ns,
nodename: nodeList.Items[0].Name,
template: completedPodNodeTemplate,
}
podns.createPingPodNode(oc)
}
exutil.By("Count all the pods with completed status")
allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, ns, "name=completed-pod")
o.Expect(getPodErr).NotTo(o.HaveOccurred())
o.Expect(len(allPods)).To(o.BeEquivalentTo(50))
// Allow the last pod IP to be released before checking NB DB
time.Sleep(10 * time.Second)
exutil.By("Verify there are no IP in NB DB for the completed pods")
ovnKNodePod, ovnkNodePodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeList.Items[0].Name)
o.Expect(ovnkNodePodErr).NotTo(o.HaveOccurred())
o.Expect(ovnKNodePod).ShouldNot(o.Equal(""))
getCmd := fmt.Sprintf("ovn-nbctl show | grep '%s' | wc -l", ns)
getCount, getCmdErr := exutil.RemoteShPodWithBashSpecifyContainer(oc, "openshift-ovn-kubernetes", ovnKNodePod, "ovnkube-controller", getCmd)
o.Expect(getCmdErr).NotTo(o.HaveOccurred())
o.Expect(strconv.Atoi(getCount)).To(o.BeEquivalentTo(0))
exutil.By("Delete all the pods with completed status")
_, delPodErr := oc.AsAdmin().Run("delete").Args("pod", "-l", "name=completed-pod", "-n", ns).Output()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
b83911c5-1bf1-4347-8219-1c53bf975cc6
|
Author:qiowang-Medium-69761-Check apbexternalroute status when all zones reported success
|
['"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:qiowang-Medium-69761-Check apbexternalroute status when all zones reported success", func() {
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
exutil.By("1. Create Admin Policy Based External route object")
ns := oc.Namespace()
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-69761",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("2. Check status of apbexternalroute object")
checkErr := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(messages).Should(o.ContainSubstring(node + ": configured external gateway IPs: " + apbExternalRoute.ip1 + "," + apbExternalRoute.ip2))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
6c9a9ee4-6875-4f52-9dc7-ba2782073aae
|
Author:qiowang-Medium-69762-Check egressfirewall status when all zones reported success
|
['"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:qiowang-Medium-69762-Check egressfirewall status when all zones reported success", func() {
ipStackType := checkIPStackType(oc)
var egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2021::/96"
} else if ipStackType == "ipv6single" {
egressFWCIDR1 = "2021::/96"
egressFWCIDR2 = "2022::/96"
} else {
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2.1.2.0/24"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Create egressfirewall object")
ns := oc.Namespace()
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("2. Check status of egressfirewall object")
checkErr := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(messages).Should(o.ContainSubstring(node + ": EgressFirewall Rules applied"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
fe2fd9f8-5366-402e-bb0b-702e856e0984
|
NonHyperShiftHOST-Longduration-NonPreRelease-Author:huirwang-High-69198-Oversized UDP packet handling. [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:huirwang-High-69198-Oversized UDP packet handling. [Disruptive]", func() {
//It is for customer bug https://issues.redhat.com/browse/OCPBUGS-23334
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
egressIPTemplate = filepath.Join(buildPruningBaseDir, "egressip-config1-template.yaml")
)
// This case needs an external host, will run it on rdu1 cluster only.
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1 cluster, skip for other envrionment!!!")
}
exutil.By("Switch to local gate way mode.")
defer switchOVNGatewayMode(oc, "shared")
switchOVNGatewayMode(oc, "local")
ns1 := oc.Namespace()
workers := excludeSriovNodes(oc)
o.Expect(len(workers) > 0).Should(o.BeTrue())
exutil.By("create a hello pod in first namespace")
pod1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns1,
nodename: workers[0],
template: pingPodTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("Label one worker node as egress node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[0], "k8s.ovn.org/egress-assignable")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], "k8s.ovn.org/egress-assignable", "true")
exutil.By("Create egressIP object")
freeIPs := findFreeIPs(oc, workers[0], 2)
o.Expect(len(freeIPs)).Should(o.Equal(2))
egressip1 := egressIPResource1{
name: "egressip-69198",
template: egressIPTemplate,
egressIP1: freeIPs[0],
egressIP2: freeIPs[1],
}
defer removeResource(oc, true, true, "egressip", egressip1.name)
egressip1.createEgressIPObject1(oc)
verifyExpectedEIPNumInEIPObject(oc, egressip1.name, 1)
exutil.By("Add matched label to test namespace")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name=test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Start iperf3 on external host")
iperfServerCmd := "nohup iperf3 -s &"
exteranlHost := "10.8.1.181"
defer func() {
err = sshRunCmd(exteranlHost, "root", "pkill iperf3 &")
o.Expect(err).NotTo(o.HaveOccurred())
}()
go func() {
err = sshRunCmd(exteranlHost, "root", iperfServerCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}()
// iperf3 would start in parallel, adding wait time to ensure iperf3 started.
time.Sleep(10 * time.Second)
exutil.By("Start iperf3 client on test pod and send udp traffic")
iperfClientCmd := "iperf3 -u -n 1647 -l 1647 -c 192.168.111.1 -R -d -i 10"
res, err := exutil.RemoteShPodWithBash(oc, ns1, pod1.name, iperfClientCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(res, "iperf Done")).Should(o.BeTrue(), fmt.Sprintf("The client sent large packet to server failed with message: %s", res))
o.Expect(strings.Contains(res, "iperf3: error - control socket has closed unexpectedly")).ShouldNot(o.BeTrue(), fmt.Sprintf("The client sokcet was closed unexpectedly with error :%s", res))
exutil.By("Remove matched label to test namespace")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "name-").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Again--start iperf3 client on test pod and send udp traffic")
res, err = exutil.RemoteShPodWithBash(oc, ns1, pod1.name, iperfClientCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(res, "iperf Done")).Should(o.BeTrue(), fmt.Sprintf("The client sent large packet to server failed with message: %s", res))
o.Expect(strings.Contains(res, "iperf3: error - control socket has closed unexpectedly")).ShouldNot(o.BeTrue(), fmt.Sprintf("The client sokcet was closed unexpectedly with error :%s", res))
})
| |||||
test case
|
openshift/openshift-tests-private
|
4ab02b48-ddf1-42fa-b325-94a8142b6cdb
|
Author:qiowang-Medium-69875-Check apbexternalroute status when there is zone reported failure [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:qiowang-Medium-69875-Check apbexternalroute status when there is zone reported failure [Disruptive]", func() {
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
workerNode, getWorkerErr := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(getWorkerErr).NotTo(o.HaveOccurred())
exutil.By("1. Create pod on one worker node")
ns := oc.Namespace()
pod := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: workerNode,
template: pingPodNodeTemplate,
}
defer pod.deletePingPodNode(oc)
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("2. Remove node annotation k8s.ovn.org/l3-gateway-config")
annotation, getAnnotationErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node/"+workerNode, "-o", "jsonpath='{.metadata.annotations.k8s\\.ovn\\.org/l3-gateway-config}'").Output()
o.Expect(getAnnotationErr).NotTo(o.HaveOccurred())
defer exutil.AddAnnotationsToSpecificResource(oc, "node/"+workerNode, "", "k8s.ovn.org/l3-gateway-config="+strings.Trim(annotation, "'"))
exutil.RemoveAnnotationFromSpecificResource(oc, "node/"+workerNode, "", "k8s.ovn.org/l3-gateway-config")
exutil.By("3. Create Admin Policy Based External route object")
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-69875",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("4. Check status of apbexternalroute object")
checkErr := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Fail")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("apbexternalroute %s doesn't show Fail in time", apbExternalRoute.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
if node == workerNode {
o.Expect(messages).Should(o.ContainSubstring(node + ": " + node + " failed to apply policy"))
} else {
o.Expect(messages).Should(o.ContainSubstring(node + ": configured external gateway IPs: " + apbExternalRoute.ip1 + "," + apbExternalRoute.ip2))
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
14750926-8267-488d-acd2-aa53bc4ffcad
|
Author:qiowang-Medium-69873-Medium-69874-Check apbexternalroute/egressfirewall status when no failure reported and not all zones reported success [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:qiowang-Medium-69873-Medium-69874-Check apbexternalroute/egressfirewall status when no failure reported and not all zones reported success [Disruptive]", func() {
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
if len(nodes) < 2 {
g.Skip("Not enough nodes for the test, need at least 2 linux nodes, skip the case!!")
}
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2, egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2021::/96"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
egressFWCIDR1 = "2021::/96"
egressFWCIDR2 = "2022::/96"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2.1.2.0/24"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Reboot one worker node, wait it becomes NotReady")
workerNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer checkNodeStatus(oc, workerNode, "Ready")
defaultInt := "br-ex"
fileContent := fmt.Sprintf("ifconfig %s down; sleep 120; ifconfig %s up;", defaultInt, defaultInt)
createFileCmd := `echo -e "` + fileContent + `" > /tmp/test.sh`
_, err1 := exutil.DebugNodeWithChroot(oc, workerNode, "bash", "-c", createFileCmd)
o.Expect(err1).NotTo(o.HaveOccurred())
delFileCmd := "rm -rf /tmp/test.sh"
defer exutil.DebugNodeWithChroot(oc, workerNode, "bash", "-c", delFileCmd)
chmodCmd := "chmod +x /tmp/test.sh"
_, err2 := exutil.DebugNodeWithChroot(oc, workerNode, "bash", "-c", chmodCmd)
o.Expect(err2).NotTo(o.HaveOccurred())
testCmd := "/tmp/test.sh"
runCmd, _, _, runCmdErr := oc.AsAdmin().Run("debug").Args("node/"+workerNode, "--to-namespace", "default", "--", "chroot", "/host", "bash", "-c", testCmd).Background()
defer runCmd.Process.Kill()
o.Expect(runCmdErr).NotTo(o.HaveOccurred())
checkNodeStatus(oc, workerNode, "NotReady")
exutil.By("2. Create Admin Policy Based External route object with static gateway when the worker node in NotReady status")
ns := oc.Namespace()
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-69873",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("3. Create egressfirewall object with allow rule when the worker node in NotReady status")
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("4. Check status of apbexternalroute/egressfirewall object")
apbExtRouteSta, apbExtRouteStaErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.status}`).Output()
o.Expect(apbExtRouteStaErr).NotTo(o.HaveOccurred())
o.Expect(apbExtRouteSta).Should(o.BeEmpty())
apbExtRouteMsgs, apbExtRouteMsgsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr).NotTo(o.HaveOccurred())
egressFWStatus, egressFWStatusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.status}`).Output()
o.Expect(egressFWStatusErr).NotTo(o.HaveOccurred())
o.Expect(egressFWStatus).Should(o.BeEmpty())
egressFWMsgs, egressFWMsgsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
if node == workerNode {
o.Expect(strings.Contains(apbExtRouteMsgs, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).ShouldNot(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs, node+": EgressFirewall Rules applied")).ShouldNot(o.BeTrue())
} else {
o.Expect(strings.Contains(apbExtRouteMsgs, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs, node+": EgressFirewall Rules applied")).Should(o.BeTrue())
}
}
exutil.By("5. Wait for the rebooted worker node back")
checkNodeStatus(oc, workerNode, "Ready")
exutil.By("6. Check status of apbexternalroute/egressfirewall object after the rebooted worker node back")
apbExtRouteCheckErr := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs2, apbExtRouteMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr2).NotTo(o.HaveOccurred())
egressFWCheckErr := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs2, egressFWMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr2).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(strings.Contains(apbExtRouteMsgs2, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs2, node+": EgressFirewall Rules applied")).Should(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
80faf7c1-c606-4cf9-ac70-e66a05f903e4
|
Author:qiowang-Medium-69876-Check egressfirewall status when there is zone reported failure
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:qiowang-Medium-69876-Check egressfirewall status when there is zone reported failure", func() {
ipStackType := checkIPStackType(oc)
var egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
egressFWCIDR1 = "1.1.1.1"
egressFWCIDR2 = "2011::11"
} else if ipStackType == "ipv6single" {
egressFWCIDR1 = "2011::11"
egressFWCIDR2 = "2012::11"
} else {
egressFWCIDR1 = "1.1.1.1"
egressFWCIDR2 = "2.1.1.1"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Create egressfirewall object which missing CIDR prefix")
ns := oc.Namespace()
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("2. Check status of egressfirewall object")
checkErr := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules not correctly applied")
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("EgressFirewall Rule %s doesn't show failure in time", egressFW.name))
messages, messagesErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(messagesErr).NotTo(o.HaveOccurred())
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(strings.Contains(messages, node+": EgressFirewall Rules not correctly applied")).Should(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
89798c1a-e355-49f0-a6d9-697acbe5ab13
|
NonHyperShiftHOST-NonPreRelease-Author:qiowang-Medium-70011-Medium-70012-Check apbexternalroute/egressfirewall status when machine added/removed [Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Author:qiowang-Medium-70011-Medium-70012-Check apbexternalroute/egressfirewall status when machine added/removed [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
nodes, getNodeErr := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
ipStackType := checkIPStackType(oc)
var externalGWIP1, externalGWIP2, egressFWCIDR1, egressFWCIDR2 string
if ipStackType == "dualstack" {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "2011::11"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2021::/96"
} else if ipStackType == "ipv6single" {
externalGWIP1 = "2011::11"
externalGWIP2 = "2011::12"
egressFWCIDR1 = "2021::/96"
egressFWCIDR2 = "2022::/96"
} else {
externalGWIP1 = "1.1.1.1"
externalGWIP2 = "1.1.1.2"
egressFWCIDR1 = "2.1.1.0/24"
egressFWCIDR2 = "2.1.2.0/24"
}
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
apbExternalRouteTemplate := filepath.Join(buildPruningBaseDir, "apbexternalroute-static-template.yaml")
egressFWTemplate := filepath.Join(buildPruningBaseDir, "egressfirewall5-template.yaml")
exutil.By("1. Create Admin Policy Based External route object with static gateway")
ns := oc.Namespace()
apbExternalRoute := apbStaticExternalRoute{
name: "externalgw-70011",
labelkey: "kubernetes.io/metadata.name",
labelvalue: ns,
ip1: externalGWIP1,
ip2: externalGWIP2,
bfd: false,
template: apbExternalRouteTemplate,
}
defer apbExternalRoute.deleteAPBExternalRoute(oc)
apbExternalRoute.createAPBExternalRoute(oc)
exutil.By("2. Create egressfirewall object with allow rule")
egressFW := egressFirewall5{
name: "default",
namespace: ns,
ruletype1: "Allow",
rulename1: "cidrSelector",
rulevalue1: egressFWCIDR1,
protocol1: "TCP",
portnumber1: 80,
ruletype2: "Allow",
rulename2: "cidrSelector",
rulevalue2: egressFWCIDR2,
protocol2: "TCP",
portnumber2: 80,
template: egressFWTemplate,
}
defer removeResource(oc, true, true, "egressfirewall", egressFW.name, "-n", egressFW.namespace)
egressFW.createEgressFW5Object(oc)
exutil.By("3. Check status of apbexternalroute/egressfirewall object")
apbExtRouteCheckErr1 := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr1, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs1, apbExtRouteMsgsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr1).NotTo(o.HaveOccurred())
egressFWCheckErr1 := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr1, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs1, egressFWMsgsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr1).NotTo(o.HaveOccurred())
for _, node := range nodes {
o.Expect(strings.Contains(apbExtRouteMsgs1, node+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
o.Expect(strings.Contains(egressFWMsgs1, node+": EgressFirewall Rules applied")).Should(o.BeTrue())
}
exutil.By("4. Add machine")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-70011"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
newNode := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
e2e.Logf("New node is:%s", newNode)
exutil.By("5. Check status of apbexternalroute/egressfirewall object when new machine added")
apbExtRouteCheckErr2 := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr2, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs2, apbExtRouteMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(apbExtRouteMsgs2, newNode+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).Should(o.BeTrue())
egressFWCheckErr2 := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr2, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs2, egressFWMsgsErr2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(egressFWMsgs2, newNode+": EgressFirewall Rules applied")).Should(o.BeTrue())
exutil.By("6. Remove machine")
ms.DeleteMachineSet(oc)
clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
exutil.By("7. Check status of apbexternalroute/egressfirewall object after machine removed")
apbExtRouteCheckErr3 := checkAPBExternalRouteStatus(oc, apbExternalRoute.name, "Success")
exutil.AssertWaitPollNoErr(apbExtRouteCheckErr3, fmt.Sprintf("apbexternalroute %s doesn't succeed in time", apbExternalRoute.name))
apbExtRouteMsgs3, apbExtRouteMsgsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("apbexternalroute", apbExternalRoute.name, `-ojsonpath={.status.messages}`).Output()
o.Expect(apbExtRouteMsgsErr3).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(apbExtRouteMsgs3, newNode+": configured external gateway IPs: "+apbExternalRoute.ip1+","+apbExternalRoute.ip2)).ShouldNot(o.BeTrue())
egressFWCheckErr3 := checkEgressFWStatus(oc, egressFW.name, ns, "EgressFirewall Rules applied")
exutil.AssertWaitPollNoErr(egressFWCheckErr3, fmt.Sprintf("EgressFirewall Rule %s doesn't apply in time", egressFW.name))
egressFWMsgs3, egressFWMsgsErr3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressfirewall", egressFW.name, "-n", egressFW.namespace, `-ojsonpath={.status.messages}`).Output()
o.Expect(egressFWMsgsErr3).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(egressFWMsgs3, newNode+": EgressFirewall Rules applied")).ShouldNot(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
f1e1af65-92d4-4e26-9999-82d619c16c48
|
Longduration-NonPreRelease-Author:jechen-High-72028-Join switch IP and management port IP for newly added node should be synced correctly into NBDB, pod on new node can communicate with old pod on old node. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Longduration-NonPreRelease-Author:jechen-High-72028-Join switch IP and management port IP for newly added node should be synced correctly into NBDB, pod on new node can communicate with old pod on old node. [Disruptive]", func() {
// This is for customer bug: https://issues.redhat.com/browse/OCPBUGS-28724
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
allowFromAllNSNetworkPolicyFile := filepath.Join(buildPruningBaseDir, "networkpolicy/allow-from-all-namespaces.yaml")
clusterinfra.SkipConditionally(oc)
exutil.By("1. Get an existing schedulable node\n")
currentNodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
oldNode := currentNodeList.Items[0].Name
exutil.By("2. Obtain the namespace\n")
ns1 := oc.Namespace()
exutil.By("3.Create a network policy in the namespace\n")
createResourceFromFile(oc, ns1, allowFromAllNSNetworkPolicyFile)
output, err := oc.Run("get").Args("networkpolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allow-from-all-namespaces"))
exutil.By("4. Create a test pod on the namespace on the existing node\n")
podOnOldNode := pingPodResourceNode{
name: "hello-pod1",
namespace: ns1,
nodename: oldNode,
template: pingPodNodeTemplate,
}
podOnOldNode.createPingPodNode(oc)
waitPodReady(oc, podOnOldNode.namespace, podOnOldNode.name)
exutil.By("5. Create a new machineset, get the new node created\n")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-72028"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
newNodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
e2e.Logf("Get new node name: %s", newNodeName)
exutil.By("6. Create second namespace,create another test pod in it on the new node\n")
oc.SetupProject()
ns2 := oc.Namespace()
podOnNewNode := pingPodResourceNode{
name: "hello-pod2",
namespace: ns2,
nodename: newNodeName,
template: pingPodNodeTemplate,
}
podOnNewNode.createPingPodNode(oc)
waitPodReady(oc, podOnNewNode.namespace, podOnNewNode.name)
exutil.By("7. Get management IP(s) and join switch IP(s) for the new node\n")
ipStack := checkIPStackType(oc)
var nodeOVNK8sMgmtIPv4, nodeOVNK8sMgmtIPv6 string
if ipStack == "dualstack" || ipStack == "ipv6single" {
nodeOVNK8sMgmtIPv6 = getOVNK8sNodeMgmtIPv6(oc, newNodeName)
}
if ipStack == "dualstack" || ipStack == "ipv4single" {
nodeOVNK8sMgmtIPv4 = getOVNK8sNodeMgmtIPv4(oc, newNodeName)
}
e2e.Logf("\n ipStack type: %s, nodeOVNK8sMgmtIPv4: %s, nodeOVNK8sMgmtIPv6: ---->%s<---- \n", ipStack, nodeOVNK8sMgmtIPv4, nodeOVNK8sMgmtIPv6)
joinSwitchIPv4, joinSwitchIPv6 := getJoinSwitchIPofNode(oc, newNodeName)
e2e.Logf("\n Got joinSwitchIPv4: %v, joinSwitchIPv6: %v\n", joinSwitchIPv4, joinSwitchIPv6)
exutil.By("8. Check host network adresses in each node's northdb, it should include join switch IP and management IP of newly added node\n")
allNodeList, nodeErr := exutil.GetAllNodes(oc)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
o.Expect(len(allNodeList)).NotTo(o.BeEquivalentTo(0))
for _, eachNodeName := range allNodeList {
ovnKubePod, podErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", eachNodeName)
o.Expect(podErr).NotTo(o.HaveOccurred())
o.Expect(ovnKubePod).ShouldNot(o.Equal(""))
if ipStack == "dualstack" || ipStack == "ipv4single" {
externalIDv4 := "external_ids:\\\"k8s.ovn.org/id\\\"=\\\"default-network-controller:Namespace:openshift-host-network:v4\\\""
hostNetworkIPsv4 := getHostNetworkIPsinNBDB(oc, eachNodeName, externalIDv4)
e2e.Logf("\n Got hostNetworkIPsv4 for node %s : %v\n", eachNodeName, hostNetworkIPsv4)
o.Expect(contains(hostNetworkIPsv4, nodeOVNK8sMgmtIPv4)).Should(o.BeTrue(), fmt.Sprintf("New node's mgmt IPv4 is not updated to node %s in NBDB!", eachNodeName))
o.Expect(unorderedContains(hostNetworkIPsv4, joinSwitchIPv4)).Should(o.BeTrue(), fmt.Sprintf("New node's join switch IPv4 is not updated to node %s in NBDB!", eachNodeName))
}
if ipStack == "dualstack" || ipStack == "ipv6single" {
externalIDv6 := "external_ids:\\\"k8s.ovn.org/id\\\"=\\\"default-network-controller:Namespace:openshift-host-network:v6\\\""
hostNetworkIPsv6 := getHostNetworkIPsinNBDB(oc, eachNodeName, externalIDv6)
e2e.Logf("\n Got hostNetworkIPsv6 for node %s : %v\n", eachNodeName, hostNetworkIPsv6)
o.Expect(contains(hostNetworkIPsv6, nodeOVNK8sMgmtIPv6)).Should(o.BeTrue(), fmt.Sprintf("New node's mgmt IPv6 is not updated to node %s in NBDB!", eachNodeName))
o.Expect(unorderedContains(hostNetworkIPsv6, joinSwitchIPv6)).Should(o.BeTrue(), fmt.Sprintf("New node's join switch IPv6 is not updated to node %s in NBDB!", eachNodeName))
}
}
exutil.By("9. Verify that new pod on new node can communicate with old pod on old node \n")
CurlPod2PodPass(oc, podOnOldNode.namespace, podOnOldNode.name, podOnNewNode.namespace, podOnNewNode.name)
CurlPod2PodPass(oc, podOnNewNode.namespace, podOnNewNode.name, podOnOldNode.namespace, podOnOldNode.name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5666cfa5-682d-493f-8248-13264b034699
|
Author:qiowang-Medium-68920-kubernetes service route is recoverable if it's cleared [Disruptive]
|
['"fmt"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:qiowang-Medium-68920-kubernetes service route is recoverable if it's cleared [Disruptive]", func() {
e2e.Logf("It is for OCPBUGS-1715")
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o", "jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get service subnets")
svcSubnetStr, getSubnetsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("network.operator", "cluster", `-ojsonpath={.spec.serviceNetwork}`).Output()
o.Expect(getSubnetsErr).NotTo(o.HaveOccurred())
svcSubnets := strings.Split(strings.Trim(svcSubnetStr, "[]"), ",")
for _, svcSubnet := range svcSubnets {
svcSubnet := strings.Trim(svcSubnet, `"`)
var verFlag string
if strings.Count(svcSubnet, ":") >= 2 {
verFlag = "-6"
} else if strings.Count(svcSubnet, ".") >= 2 {
verFlag = "-4"
}
exutil.By("Delete service route on one of the worker node")
origSvcRouteStr, getRouteErr := exutil.DebugNode(oc, nodeName, "ip", verFlag, "route", "show", svcSubnet)
e2e.Logf("original service route is: -- %s --", origSvcRouteStr)
o.Expect(getRouteErr).NotTo(o.HaveOccurred())
re := regexp.MustCompile(svcSubnet + ".*\n")
origSvcRouteLine := re.FindAllString(origSvcRouteStr, -1)[0]
origSvcRoute := strings.Trim(origSvcRouteLine, "\n")
defer func() {
svcRoute1, deferErr := exutil.DebugNode(oc, nodeName, "ip", verFlag, "route", "show", svcSubnet)
o.Expect(deferErr).NotTo(o.HaveOccurred())
if !strings.Contains(svcRoute1, origSvcRoute) {
addCmd := "ip " + verFlag + " route add " + origSvcRoute
exutil.DebugNode(oc, nodeName, "bash", "-c", addCmd)
}
}()
delCmd := "ip " + verFlag + " route del " + origSvcRoute
_, delRouteErr := exutil.DebugNode(oc, nodeName, "bash", "-c", delCmd)
o.Expect(delRouteErr).NotTo(o.HaveOccurred())
exutil.By("Check the service route is restored")
routeOutput := wait.Poll(15*time.Second, 300*time.Second, func() (bool, error) {
svcRoute, getRouteErr1 := exutil.DebugNode(oc, nodeName, "ip", verFlag, "route", "show", svcSubnet)
o.Expect(getRouteErr1).NotTo(o.HaveOccurred())
if strings.Contains(svcRoute, origSvcRoute) {
return true, nil
}
e2e.Logf("Route is not restored and try again")
return false, nil
})
exutil.AssertWaitPollNoErr(routeOutput, fmt.Sprintf("Fail to restore route and the error is:%s", routeOutput))
exutil.By("Check the log for restore the service route")
ovnkubePod, getPodErr := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(getPodErr).NotTo(o.HaveOccurred())
filter := "'Route Manager:.*Dst: " + svcSubnet + "' | tail -1"
podLogs, getLogErr := checkLogMessageInPod(oc, "openshift-ovn-kubernetes", "ovnkube-controller", ovnkubePod, filter)
o.Expect(getLogErr).NotTo(o.HaveOccurred())
o.Expect(podLogs).To(o.ContainSubstring("netlink route addition event"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
83bfcea3-32da-4935-b6ea-c0a0b2ddd83e
|
Author:anusaxen-Medium-66884-Larger packet size than Cluster MTU should not cause packet drops
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:anusaxen-Medium-66884-Larger packet size than Cluster MTU should not cause packet drops", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
platform := checkPlatform(oc)
if !strings.Contains(platform, "aws") {
g.Skip("Test requires AWS, skip for other platforms!")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("create a hello pod1 in namespace")
pod1ns := pingPodResourceNode{
name: "hello-pod1",
namespace: oc.Namespace(),
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns.createPingPodNode(oc)
waitPodReady(oc, pod1ns.namespace, pod1ns.name)
exutil.By("create a hello-pod2 in namespace")
pod2ns := pingPodResourceNode{
name: "hello-pod2",
namespace: oc.Namespace(),
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns.createPingPodNode(oc)
waitPodReady(oc, pod2ns.namespace, pod2ns.name)
exutil.By("Get IP of the hello-pod2")
helloPod2IP := getPodIPv4(oc, oc.Namespace(), "hello-pod2")
//Cluster network MTU on AWS is 8901 and negotiated MSS is 8849 which accomodates TCP and IP header etc. We will use MSS of 9000 in this test
iperfClientCmd := "iperf3 -c " + helloPod2IP + " -p 60001 -b 30M -N -V -M 9000|grep -i -A 5 'Test Complete' | grep -i -A 1 'Retr' | awk '{ print $9 }' | tail -1"
iperfServerCmd := "nohup iperf3 -s -p 60001&"
cmdBackground, _, _, errBackground := oc.Run("exec").Args("-n", pod2ns.namespace, pod2ns.name, "--", "/bin/sh", "-c", iperfServerCmd).Background()
defer cmdBackground.Process.Kill()
o.Expect(errBackground).NotTo(o.HaveOccurred())
retr_count, err := oc.Run("exec").Args("-n", pod1ns.namespace, pod1ns.name, "--", "/bin/sh", "-c", iperfClientCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(fmt.Sprintf("Total Retr count is \n %s", retr_count))
retr_count_int, err := strconv.Atoi(retr_count)
o.Expect(err).NotTo(o.HaveOccurred())
//iperf simulates 10 iterations with 30Mbps so we expect retr count of not more than 1 per iteration hence should not be more than 10 in total
o.Expect(retr_count_int < 11).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
a73402ca-d837-44d5-8acc-e35e9afa6ae9
|
Author:anusaxen-High-73205-High-72817-Make sure internalJoinSubnet and internalTransitSwitchSubnet is configurable post install as a Day 2 operation [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:anusaxen-High-73205-High-72817-Make sure internalJoinSubnet and internalTransitSwitchSubnet is configurable post install as a Day 2 operation [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("create a hello pod1 in namespace")
pod1ns := pingPodResourceNode{
name: "hello-pod1",
namespace: oc.Namespace(),
nodename: nodeList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod1ns.createPingPodNode(oc)
waitPodReady(oc, oc.Namespace(), pod1ns.name)
exutil.By("create a hello-pod2 in namespace")
pod2ns := pingPodResourceNode{
name: "hello-pod2",
namespace: oc.Namespace(),
nodename: nodeList.Items[1].Name,
template: pingPodNodeTemplate,
}
pod2ns.createPingPodNode(oc)
waitPodReady(oc, oc.Namespace(), pod2ns.name)
g.By("Create a test service backing up both the above pods")
svc := genericServiceResource{
servicename: "test-service-73205",
namespace: oc.Namespace(),
protocol: "TCP",
selector: "hello-pod",
serviceType: "ClusterIP",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "",
template: genericServiceTemplate,
}
if ipStackType == "ipv4single" {
svc.ipFamilyPolicy = "SingleStack"
} else {
svc.ipFamilyPolicy = "PreferDualStack"
}
svc.createServiceFromParams(oc)
//custom patches to test depending on type of cluster addressing
customPatchIPv4 := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"100.99.0.0/16\",\"internalTransitSwitchSubnet\": \"100.69.0.0/16\"}}}}}"
customPatchIPv6 := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv6\":{\"internalJoinSubnet\": \"ab98::/64\",\"internalTransitSwitchSubnet\": \"ab97::/64\"}}}}}"
customPatchDualstack := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"100.99.0.0/16\",\"internalTransitSwitchSubnet\": \"100.69.0.0/16\"},\"ipv6\": {\"internalJoinSubnet\": \"ab98::/64\",\"internalTransitSwitchSubnet\": \"ab97::/64\"}}}}}"
//gather original cluster values so that we can defer to them later once test done
currentinternalJoinSubnetIPv4Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv4.internalJoinSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentinternalTransitSwSubnetIPv4Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv4.internalTransitSwitchSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentinternalJoinSubnetIPv6Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv6.internalJoinSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
currentinternalTransitSwSubnetIPv6Value, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Network.operator.openshift.io/cluster", "-o=jsonpath={.items[*].spec.defaultNetwork.ovnKubernetesConfig.ipv6.internalTransitSwitchSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//if any of value is null on exisiting cluster, it indicates that cluster came up with following default values assigned by OVNK
if (currentinternalJoinSubnetIPv4Value == "") || (currentinternalJoinSubnetIPv6Value == "") {
currentinternalJoinSubnetIPv4Value = "100.64.0.0/16"
currentinternalTransitSwSubnetIPv4Value = "100.88.0.0/16"
currentinternalJoinSubnetIPv6Value = "fd98::/64"
currentinternalTransitSwSubnetIPv6Value = "fd97::/64"
}
//vars to patch cluster back to original state
patchIPv4original := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv4Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv4Value + "\"}}}}}"
patchIPv6original := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv6\":{\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv6Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv6Value + "\"}}}}}"
patchDualstackoriginal := "{\"spec\":{\"defaultNetwork\":{\"ovnKubernetesConfig\":{\"ipv4\":{\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv4Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv4Value + "\"},\"ipv6\": {\"internalJoinSubnet\": \"" + currentinternalJoinSubnetIPv6Value + "\",\"internalTransitSwitchSubnet\": \"" + currentinternalTransitSwSubnetIPv6Value + "\"}}}}}"
if ipStackType == "ipv4single" {
defer func() {
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", patchIPv4original)
err := checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
}()
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", customPatchIPv4)
} else if ipStackType == "ipv6single" {
defer func() {
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", patchIPv6original)
err := checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
}()
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", customPatchIPv6)
} else {
defer func() {
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", patchDualstackoriginal)
err := checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube didn't trigger or rolled out successfully post oc patch"))
}()
patchResourceAsAdmin(oc, "Network.operator.openshift.io/cluster", customPatchDualstack)
}
err = checkOVNKState(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("OVNkube never trigger or rolled out successfully post oc patch"))
//check usual svc and pod connectivities post migration which also ensures disruption doesn't last post successful rollout
CurlPod2PodPass(oc, oc.Namespace(), pod1ns.name, oc.Namespace(), pod2ns.name)
CurlPod2SvcPass(oc, oc.Namespace(), oc.Namespace(), pod1ns.name, "test-service-73205")
})
| |||||
test case
|
openshift/openshift-tests-private
|
222ce4bb-1501-464c-ba17-75cffde33092
|
Author:jechen-ConnectedOnly-High-74589-Pod-to-external TCP connectivity using port in range of snat port.
|
['"context"', '"fmt"', '"path/filepath"', '"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:jechen-ConnectedOnly-High-74589-Pod-to-external TCP connectivity using port in range of snat port.", func() {
// For customer bug https://issues.redhat.com/browse/OCPBUGS-32202
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
genericServiceTemplate := filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
testPodNodeTemplate := filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
url := "www.example.com"
ipStackType := checkIPStackType(oc)
if checkDisconnect(oc) || ipStackType == "ipv6single" {
g.Skip("Skip the test on disconnected cluster or singlev6 cluster.")
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("Not enough node available, need at least one node for the test, skip the case!!")
}
exutil.By("1. create a namespace, create nodeport service on one node")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("2. Create a hello pod in ns")
pod1 := pingPodResourceNode{
name: "hello-pod",
namespace: ns,
nodename: nodeList.Items[0].Name,
template: testPodNodeTemplate,
}
pod1.createPingPodNode(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("3. Create a nodePort type service fronting the above pod")
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "NodePort",
ipFamilyPolicy: "",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "", //This no value parameter will be ignored
template: genericServiceTemplate,
}
if ipStackType == "dualstack" {
svc.ipFamilyPolicy = "PreferDualStack"
} else {
svc.ipFamilyPolicy = "SingleStack"
}
defer removeResource(oc, true, true, "service", svc.servicename, "-n", svc.namespace)
svc.createServiceFromParams(oc)
exutil.By("4. Get NodePort at which service listens.")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", ns, svc.servicename, "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. From external, curl NodePort service with its port to make sure NodePort service works")
CurlNodePortPass(oc, nodeList.Items[1].Name, nodeList.Items[0].Name, nodePort)
exutil.By("6. Create another test pod on another node, from the test pod to curl local port of external url, verify the connection can succeed\n")
pod2 := pingPodResourceNode{
name: "testpod",
namespace: ns,
nodename: nodeList.Items[1].Name,
template: testPodNodeTemplate,
}
pod2.createPingPodNode(oc)
waitPodReady(oc, ns, pod2.name)
cmd := fmt.Sprintf("curl --local-port 32012 -v -I -L http://%s", url)
expectedString := fmt.Sprintf(`^* Connected to %s \(([\d\.]+)\) port 80 `, url)
re := regexp.MustCompile(expectedString)
connectErr := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
_, execCmdOutput, err := e2eoutput.RunHostCmdWithFullOutput(ns, pod2.name, cmd)
if err != nil {
e2e.Logf("Getting err :%v, trying again...", err)
return false, nil
}
if !re.MatchString(execCmdOutput) {
e2e.Logf("Did not get expected output, trying again...")
e2e.Logf("\n execCmdOutput is %v\n", execCmdOutput)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(connectErr, fmt.Sprintf("Connection to %s did not succeed!", url))
})
| |||||
test case
|
openshift/openshift-tests-private
|
766e20b4-027f-43bc-bee7-2750a6d7bc23
|
Author:huirwang-High-75613-Should be able to access applications when client ephemeral port is 22623 or 22624
|
['"fmt"', '"net"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:huirwang-High-75613-Should be able to access applications when client ephemeral port is 22623 or 22624", func() {
// https://issues.redhat.com/browse/OCPBUGS-37541
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
g.By("Get new namespace")
ns1 := oc.Namespace()
g.By("Create test pods")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
g.By("Should be able to access applications when client ephemeral port is 22623 or 22624")
testPodName := getPodName(oc, ns1, "name=test-pods")
pod1Name := testPodName[0]
localPort := []string{"22623", "22624"}
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" {
pod2IP1, pod2IP2 := getPodIP(oc, ns1, testPodName[1])
for i := 0; i < 2; i++ {
curlCmd := fmt.Sprintf("curl --connect-timeout 5 -s %s --local-port %s", net.JoinHostPort(pod2IP1, "8080"), localPort[i])
_, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name, curlCmd, 60*time.Second, 120*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
curlCmd = fmt.Sprintf("curl --connect-timeout 5 -s %s --local-port %s", net.JoinHostPort(pod2IP2, "8080"), localPort[i])
// Need wait 1 minute for local binding port released
_, err = e2eoutput.RunHostCmdWithRetries(ns1, pod1Name, curlCmd, 60*time.Second, 120*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
pod2IP1, _ := getPodIP(oc, ns1, testPodName[1])
for i := 0; i < 2; i++ {
curlCmd := fmt.Sprintf("curl --connect-timeout 5 -s %s --local-port %s", net.JoinHostPort(pod2IP1, "8080"), localPort[i])
_, err := e2eoutput.RunHostCmdWithRetries(ns1, pod1Name, curlCmd, 60*time.Second, 120*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
47e3492f-42fc-4d16-93ac-b4b5ff12800e
|
Author:huirwang-High-75758-Bad certificate should not cause ovn pods crash. [Disruptive]
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:huirwang-High-75758-Bad certificate should not cause ovn pods crash. [Disruptive]", func() {
// https://issues.redhat.com/browse/OCPBUGS-36195
exutil.By("Get one worker node.")
node1, err := exutil.GetFirstCoreOsWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if len(node1) < 1 {
g.Skip("Skip the test as no enough worker nodes.")
}
exutil.By("Get the ovnkube-node pod on specific node.")
ovnPod, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", node1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovnPod).ShouldNot(o.BeEmpty())
exutil.By("Create bad ovnkube-node-certs certificate")
cmd := `cd /var/lib/ovn-ic/etc/ovnkube-node-certs && ls | grep '^ovnkube-client-.*\.pem$' | grep -v 'ovnkube-client-current.pem' | xargs -I {} sh -c 'echo "" > {}'`
_, err = exutil.DebugNodeWithChroot(oc, node1, "bash", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Restart ovnkube-node pod on specific node.")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnPod, "-n", "openshift-ovn-kubernetes", "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait ovnkube-node pod to be running")
ovnPod, err = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", node1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovnPod).ShouldNot(o.BeEmpty())
exutil.AssertPodToBeReady(oc, ovnPod, "openshift-ovn-kubernetes")
})
| |||||
test case
|
openshift/openshift-tests-private
|
bf748d6b-9dd8-42b1-9229-df2d7d92b482
|
Author:meinli-Medium-45146-Pod should be healthy when gw IP is single stack on dual stack cluster
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:meinli-Medium-45146-Pod should be healthy when gw IP is single stack on dual stack cluster", func() {
// https://bugzilla.redhat.com/show_bug.cgi?id=1986708
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-template.yaml")
)
ipStackType := checkIPStackType(oc)
if ipStackType != "dualstack" {
g.Skip("This case is only validate in DualStack cluster, skip it!!!")
}
exutil.By("1. Get namespace")
ns := oc.Namespace()
exutil.By("2. Create a pod in ns namespace")
pod := pingPodResource{
name: "hello-pod",
namespace: ns,
template: pingPodTemplate,
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.name, "-n", pod.namespace).Execute()
pod.createPingPod(oc)
waitPodReady(oc, pod.namespace, pod.name)
exutil.By("3. Patch annotation for hello-pod")
annotationsCmd := fmt.Sprintf(`{ "metadata":{
"annotations": {
"k8s.ovn.org/routing-namespaces": "%s",
"k8s.ovn.org/routing-network": "foo",
"k8s.v1.cni.cncf.io/network-status": "[{\"name\":\"foo\",\"interface\":\"net1\",\"ips\":[\"172.19.0.5\"],\"mac\":\"01:23:45:67:89:10\"}]"
}
}
}`, ns)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("pod", pod.name, "-n", ns, "-p", annotationsCmd, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("4. Verify pod is healthy and running")
waitPodReady(oc, ns, pod.name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
d30e329c-a488-4cab-880e-aa61f73d3942
|
Author:meinli-NonPreRelease-Medium-34674-Ensure ovnkube-master nbdb and sbdb exit properly. [Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"sync"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author:meinli-NonPreRelease-Medium-34674-Ensure ovnkube-master nbdb and sbdb exit properly. [Disruptive]", func() {
exutil.By("1. Enable ovnkube-master pod debug log by ovn-appctl")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
MasterNodeName, err := exutil.GetPodNodeName(oc, "openshift-ovn-kubernetes", ovnMasterPodName)
o.Expect(err).NotTo(o.HaveOccurred())
ctls := []string{"ovnnb_db.ctl", "ovnsb_db.ctl"}
for _, ctl := range ctls {
dbgCmd := fmt.Sprintf("ovn-appctl -t /var/run/ovn/%s vlog/set console:jsonrpc:dbg", ctl)
_, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnMasterPodName, dbgCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2. Check ovnkube-master pod debug log enabled successfully and make hard-link(ln) to preserve log")
LogsPath := "/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-*"
var wg sync.WaitGroup
Database := []string{"nbdb", "sbdb"}
for _, db := range Database {
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
logPath := filepath.Join(LogsPath, db, "*.log")
checkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 20*time.Second, false, func(cxt context.Context) (bool, error) {
resultOutput, err := exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("tail -10 %s", logPath))
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(resultOutput, "jsonrpc") {
e2e.Logf("ovnkube-pod debug log has been successfully enabled!!!")
// select the most recent file to do hard-link
_, lnErr := exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("ln -v $(ls -1t %s | head -n 1) /var/log/%s.log", logPath, db))
o.Expect(lnErr).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("%v,Waiting for ovnkube-master pod debug log enable, try again ...,", err)
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, "Enable ovnkube-master pod debug log timeout.")
}()
}
wg.Wait()
exutil.By("3. delete the ovnkube-master pod and check log process should be exited")
defer checkOVNKState(oc)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", ovnMasterPodName, "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for _, db := range Database {
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
defer exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("rm -f /var/log/%s.log", db))
checkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 20*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := exutil.DebugNodeWithChroot(oc, MasterNodeName, "/bin/bash", "-c", fmt.Sprintf("tail -10 /var/log/%s.log", db))
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, fmt.Sprintf("Exiting ovn%s_db", strings.Split(db, "db")[0])) {
e2e.Logf(fmt.Sprintf("ovnkube-master pod %s exit properly!!!", db))
return true, nil
}
e2e.Logf("%v,Waiting for ovnkube-master pod log sync up, try again ...,", err)
return false, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Check ovnkube-master pod %s debug log timeout.", db))
}()
}
wg.Wait()
})
| |||||
test case
|
openshift/openshift-tests-private
|
364565a2-077c-4fe8-b80c-435b7d41b31e
|
Author: meinli-Medium-72506-Traffic with dst ip from service CIDR that doesn't match existing svc ip+port should be dropped
|
['"context"', '"fmt"', '"net"', '"path/filepath"', '"regexp"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovn_misc.go
|
g.It("Author: meinli-Medium-72506-Traffic with dst ip from service CIDR that doesn't match existing svc ip+port should be dropped", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
)
exutil.By("1. Get namespace and worker node")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("This case requires one node, but the cluster han't one")
}
workerNode := nodeList.Items[0].Name
ns := oc.Namespace()
exutil.By("2. create a service")
createResourceFromFile(oc, ns, testSvcFile)
ServiceOutput, serviceErr := oc.WithoutNamespace().Run("get").Args("service", "-n", ns).Output()
o.Expect(serviceErr).NotTo(o.HaveOccurred())
o.Expect(ServiceOutput).To(o.ContainSubstring("test-service"))
exutil.By("3. Curl clusterIP svc from node")
svcIP1, svcIP2 := getSvcIP(oc, ns, "test-service")
if svcIP2 != "" {
svc4URL := net.JoinHostPort(svcIP2, "27018")
output, _ := exutil.DebugNode(oc, workerNode, "curl", svc4URL, "--connect-timeout", "5")
o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed")))
}
svcURL := net.JoinHostPort(svcIP1, "27018")
output, _ := exutil.DebugNode(oc, workerNode, "curl", svcURL, "--connect-timeout", "5")
o.Expect(output).To(o.Or(o.ContainSubstring("28"), o.ContainSubstring("Failed")))
exutil.By("4. Validate the drop packets counter is increasing from svc network")
ovnkubePodName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", workerNode)
o.Expect(err).NotTo(o.HaveOccurred())
cmd := "ovs-ofctl dump-flows br-ex | grep -i 'priority=105'"
output, err = e2eoutput.RunHostCmd("openshift-ovn-kubernetes", ovnkubePodName, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
r := regexp.MustCompile(`n_packets=(\d+).*?actions=drop`)
matches := r.FindAllStringSubmatch(output, -1)
// only compare the latest action drop to make sure won't be influenced by other case
o.Expect(len(matches)).ShouldNot(o.Equal(0))
o.Expect(len(matches[0])).To(o.Equal(2))
o.Expect(strconv.Atoi(matches[0][1])).To(o.BeNumerically(">", 0))
exutil.By("5. Validate no packet are seen on br-ex from src")
if svcIP2 != "" {
output, err := e2eoutput.RunHostCmd("openshift-ovn-kubernetes", ovnkubePodName, fmt.Sprintf("ovs-ofctl dump-flows br-ex | grep -i 'src=%s'", svcIP2))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.BeEmpty())
}
output, err = e2eoutput.RunHostCmd("openshift-ovn-kubernetes", ovnkubePodName, fmt.Sprintf("ovs-ofctl dump-flows br-ex | grep -i 'src=%s'", svcIP1))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.BeEmpty())
})
| |||||
test
|
openshift/openshift-tests-private
|
9854acbc-15ba-41c3-8c6b-e6136452697e
|
pod_udn
|
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
package networking
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN udn pods", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("networking-udn", exutil.KubeConfigPath())
testDataDirUDN = exutil.FixturePath("testdata", "networking/udn")
)
g.BeforeEach(func() {
SkipIfNoFeatureGate(oc, "NetworkSegmentation")
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovn") {
g.Skip("Skip testing on non-ovn cluster!!!")
}
})
g.It("Author:anusaxen-Critical-74921-Check udn pods isolation on user defined networks", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.151.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResource{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
template: udnPodTemplate,
}
pod2.createUdnPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
g.It("Author:anusaxen-Critical-75236-Check udn pods are not isolated if same nad network is shared across two namespaces", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.150.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2010:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.150.0.0/16/24,2010:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: "l3-network-ns1", //Keeping same nad network name across all which is l3-network-ns1
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResource{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
template: udnPodTemplate,
}
pod2.createUdnPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should NOT be isolated
CurlPod2PodPassUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
g.It("Author:huirwang-High-75223-Restarting ovn pods should not break UDN primary network traffic.[Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.151.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, nadNS[i], nadResourcename[i]) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename[i])
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename[i])
}
}
exutil.By("Create replica pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS1Names := getPodName(oc, ns1, "name=test-pods")
CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1])
exutil.By("create replica pods in ns2")
createResourceFromFile(oc, ns2, testPodFile)
err = waitForPodWithLabelReady(oc, ns2, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS2Names := getPodName(oc, ns2, "name=test-pods")
CurlPod2PodPassUDN(oc, ns2, testpodNS2Names[0], ns2, testpodNS2Names[1])
exutil.By("Restart OVN pods")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, "openshift-ovn-kubernetes")
exutil.By("Verify the connection in UDN primary network not broken.")
CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1])
CurlPod2PodPassUDN(oc, ns2, testpodNS2Names[0], ns2, testpodNS2Names[1])
})
g.It("Author:huirwang-Medium-75238-NAD can be created with secondary role with primary UDN in same namespace.", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
pingPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_annotation_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-2-" + ns1}
role := []string{"primary", "secondary"}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.161.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], ns1))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: ns1,
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: ns1 + "/" + nadResourcename[i],
role: role[i],
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns1, nadResourcename[i]) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename[i])
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename[i])
}
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a hello pod in ns1 refers to secondary udn network")
pod2 := udnPodSecNADResource{
name: "hello-pod-ns1-2",
namespace: ns1,
label: "hello-pod",
annotation: "/l3-network-2-" + ns1,
template: pingPodTemplate,
}
pod2.createUdnPodWithSecNAD(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
exutil.By("Verify the two pods between primary and udn networks work well")
CurlPod2PodPassUDN(oc, ns1, pod1.name, ns1, pod2.name)
exutil.By("Verify the pod2 has secondary network, but pod1 doesn't. ")
pod1IPs, err := execCommandInSpecificPod(oc, ns1, pod1.name, "ip a")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pod1IPs, "net1@")).NotTo(o.BeTrue())
pod2IPs, err := execCommandInSpecificPod(oc, ns1, pod2.name, "ip a")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pod2IPs, "net1@")).To(o.BeTrue())
})
g.It("Author:huirwang-Medium-75658-Check sctp traffic work well via udn pods user defined networks for laye3. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctp/sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctp/sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
ipStackType := checkIPStackType(oc)
exutil.By("Setting privileges on the namespace")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
var cidr, ipv4cidr, ipv6cidr string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
prefix = 64
} else {
ipv4cidr = "10.150.0.0/16"
ipv4prefix = 24
ipv6cidr = "2010:100:200::0/48"
ipv6prefix = 64
}
}
exutil.By("Create CRD for UDN")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr,
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
exutil.By("Verify sctp server pod can be accessed for UDN network.")
if ipStackType == "dualstack" {
sctpServerIPv6, sctpServerIPv4 := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1")
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
sctpServerIP, _ := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1")
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
})
g.It("Author:weliang-Medium-75623-Feature Integration UDN with multus. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
podenvname = "Hello OpenShift"
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
oc.CreateNamespaceUDN()
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
exutil.By("Creating Layer2 UDN CRD with Primary role")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75239",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDSingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating NAD for ns")
nad := dualstackNAD{
nadname: "dualstack",
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "20.200.200.0/24",
ipv6range: "2000:200:200::0/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad.nadname, "-n", ns).Execute()
nad.createDualstackNAD(oc)
exutil.By("Creating three testing pods consuming above network-attach-definition in ns")
pods := make([]udnPodSecNADResourceNode, 3)
var podNames []string
for i := 0; i < 3; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nadname: nad.nadname,
nodename: nodeList.Items[0].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[i].name)
podNames = append(podNames, pods[i].name)
}
exutil.By("Verifying the all pods get dual IPs")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[0])
pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns, podNames[1])
exutil.By("Verifying that there is no traffic blocked between pods")
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname)
})
g.It("Author:huirwang-Medium-75239-Check sctp traffic work well via udn pods user defined networks for layer2. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctp/sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctp/sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
ipStackType := checkIPStackType(oc)
exutil.By("Setting privileges on the namespace")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
exutil.By("Create CRD for UDN")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75239",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDSingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
exutil.By("Verify sctp server pod can be accessed for UDN network.")
if ipStackType == "dualstack" {
sctpServerIPv6, sctpServerIPv4 := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1")
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
sctpServerIP, _ := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1")
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
})
g.It("Author:qiowang-High-75254-Check kubelet probes are allowed via default network's LSP for the UDN pods", func() {
var (
udnCRDdualStack = filepath.Join(testDataDirUDN, "udn_crd_dualstack2_template.yaml")
udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml")
udnPodLivenessTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_liveness_template.yaml")
udnPodReadinessTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_readiness_template.yaml")
udnPodStartupTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_startup_template.yaml")
livenessProbePort = 8080
readinessProbePort = 8081
startupProbePort = 1234
)
exutil.By("1. Create privileged namespace")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("2. Create CRD for UDN")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
prefix = 64
} else {
ipv4cidr = "10.150.0.0/16"
ipv4prefix = 24
ipv6cidr = "2010:100:200::0/48"
ipv6prefix = 64
}
}
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-ds-75254",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr,
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-ss-75254",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. Create a udn hello pod with liveness probe in ns1")
pod1 := udnPodWithProbeResource{
name: "hello-pod-ns1-liveness",
namespace: ns,
label: "hello-pod",
port: livenessProbePort,
failurethreshold: 1,
periodseconds: 1,
template: udnPodLivenessTemplate,
}
pod1.createUdnPodWithProbe(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("4. Capture packets in pod " + pod1.name + ", check liveness probe traffic is allowed via default network")
tcpdumpCmd1 := fmt.Sprintf("timeout 5s tcpdump -nni eth0 port %v", pod1.port)
cmdTcpdump1, cmdOutput1, _, err1 := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, pod1.name, "--", "bash", "-c", tcpdumpCmd1).Background()
defer cmdTcpdump1.Process.Kill()
o.Expect(err1).NotTo(o.HaveOccurred())
cmdTcpdump1.Wait()
e2e.Logf("The captured packet is %s", cmdOutput1.String())
expPacket1 := strconv.Itoa(pod1.port) + ": Flags [S]"
o.Expect(strings.Contains(cmdOutput1.String(), expPacket1)).To(o.BeTrue())
exutil.By("5. Create a udn hello pod with readiness probe in ns1")
pod2 := udnPodWithProbeResource{
name: "hello-pod-ns1-readiness",
namespace: ns,
label: "hello-pod",
port: readinessProbePort,
failurethreshold: 1,
periodseconds: 1,
template: udnPodReadinessTemplate,
}
pod2.createUdnPodWithProbe(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
exutil.By("6. Capture packets in pod " + pod2.name + ", check readiness probe traffic is allowed via default network")
tcpdumpCmd2 := fmt.Sprintf("timeout 5s tcpdump -nni eth0 port %v", pod2.port)
cmdTcpdump2, cmdOutput2, _, err2 := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, pod2.name, "--", "bash", "-c", tcpdumpCmd2).Background()
defer cmdTcpdump2.Process.Kill()
o.Expect(err2).NotTo(o.HaveOccurred())
cmdTcpdump2.Wait()
e2e.Logf("The captured packet is %s", cmdOutput2.String())
expPacket2 := strconv.Itoa(pod2.port) + ": Flags [S]"
o.Expect(strings.Contains(cmdOutput2.String(), expPacket2)).To(o.BeTrue())
exutil.By("7. Create a udn hello pod with startup probe in ns1")
pod3 := udnPodWithProbeResource{
name: "hello-pod-ns1-startup",
namespace: ns,
label: "hello-pod",
port: startupProbePort,
failurethreshold: 100,
periodseconds: 2,
template: udnPodStartupTemplate,
}
pod3.createUdnPodWithProbe(oc)
waitPodReady(oc, pod3.namespace, pod3.name)
exutil.By("8. Capture packets in pod " + pod3.name + ", check readiness probe traffic is allowed via default network")
tcpdumpCmd3 := fmt.Sprintf("timeout 10s tcpdump -nni eth0 port %v", pod3.port)
cmdTcpdump3, cmdOutput3, _, err3 := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, pod3.name, "--", "bash", "-c", tcpdumpCmd3).Background()
defer cmdTcpdump3.Process.Kill()
o.Expect(err3).NotTo(o.HaveOccurred())
cmdTcpdump3.Wait()
e2e.Logf("The captured packet is %s", cmdOutput3.String())
expPacket3 := strconv.Itoa(pod3.port) + ": Flags [S]"
o.Expect(strings.Contains(cmdOutput3.String(), expPacket3)).To(o.BeTrue())
})
g.It("Author:anusaxen-Critical-75876-Check udn pods are not isolated if same nad network is shared across two namespaces(layer 2)", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l2-network-" + ns1, "l2-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet string
if ipStackType == "ipv4single" {
subnet = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
subnet = "2010:100:200::0/60"
} else {
subnet = "10.150.0.0/16,2010:100:200::0/60"
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: "l2-network",
topology: "layer2",
subnet: subnet,
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResource{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
template: udnPodTemplate,
}
pod2.createUdnPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should NOT be isolated
CurlPod2PodPassUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
g.It("Author:anusaxen-Critical-75875-Check udn pods isolation on user defined networks (layer 2)", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l2-network-" + ns1, "l2-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16", "10.151.0.0/16"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16,2010:100:200::0/60", "10.151.0.0/16,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: "layer2",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResource{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
template: udnPodTemplate,
}
pod2.createUdnPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
g.It("Author:weliang-NonPreRelease-Longduration-Medium-75624-Feture intergration UDN with multinetworkpolicy. [Disruptive]", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
mtu int32 = 1300
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
multihomingPodTemplate = filepath.Join(buildPruningBaseDir, "multihoming/multihoming-pod-template.yaml")
policyFile = filepath.Join(testDataDirUDN, "udn_with_multiplenetworkpolicy.yaml")
patchSResource = "networks.operator.openshift.io/cluster"
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
exutil.By("Enabling useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False")
waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False")
waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False")
exutil.By("Creating a new namespace for this MultiNetworkPolicy testing")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project75624"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
exutil.By("Creating NAD1 for ns1")
nad1 := udnNetDefResource{
nadname: "udn-primary-net",
namespace: ns1,
nad_network_name: "udn-primary-net",
topology: "layer3",
subnet: "10.100.0.0/16/24",
mtu: mtu,
net_attach_def_name: ns1 + "/" + "udn-primary-net",
role: "primary",
template: udnNadtemplate,
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad1.nadname, "-n", ns1).Execute()
nad1.createUdnNad(oc)
exutil.By("Verifying the configured NAD1")
if checkNAD(oc, ns1, nad1.nadname) {
e2e.Logf("The correct network-attach-definition: %v is created!", nad1.nadname)
} else {
e2e.Failf("The correct network-attach-definition: %v is not created!", nad1.nadname)
}
exutil.By("Creating NAD2 for ns1")
nad2 := dualstackNAD{
nadname: "dualstack",
namespace: ns1,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "192.168.10.0/24",
ipv6range: "fd00:dead:beef:10::/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad2.nadname, "-n", ns1).Execute()
nad2.createDualstackNAD(oc)
exutil.By("Verifying the configured NAD2")
if checkNAD(oc, ns1, nad2.nadname) {
e2e.Logf("The correct network-attach-definition: %v is created!", nad2.nadname)
} else {
e2e.Failf("The correct network-attach-definition: %v is not created!", nad2.nadname)
}
nadName := "dualstack"
nsWithnad := ns1 + "/" + nadName
exutil.By("Configuring pod1 for additional network using NAD2")
pod1 := testMultihomingPod{
name: "blue-pod-1",
namespace: ns1,
podlabel: "blue-pod",
nadname: nsWithnad,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod1.createTestMultihomingPod(oc)
exutil.By("Configuring pod2 for additional network using NAD2")
pod2 := testMultihomingPod{
name: "blue-pod-2",
namespace: ns1,
podlabel: "blue-pod",
nadname: nsWithnad,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod2.createTestMultihomingPod(oc)
exutil.By("Verifying both pods with same label of blue-pod are ready for testing")
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=blue-pod")).NotTo(o.HaveOccurred())
exutil.By("Configuring pod3 for additional network using NAD2")
pod3 := testMultihomingPod{
name: "red-pod-1",
namespace: ns1,
podlabel: "red-pod",
nadname: nsWithnad,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod3.createTestMultihomingPod(oc)
exutil.By("Configuring pod4 for additional network NAD2")
pod4 := testMultihomingPod{
name: "red-pod-2",
namespace: ns1,
podlabel: "red-pod",
nadname: nsWithnad,
nodename: nodeList.Items[0].Name,
podenvname: "",
template: multihomingPodTemplate,
}
pod4.createTestMultihomingPod(oc)
exutil.By("Verifying both pods with same label of red-pod are ready for testing")
o.Expect(waitForPodWithLabelReady(oc, ns1, "name=red-pod")).NotTo(o.HaveOccurred())
exutil.By("Getting the deployed pods' names")
podList, podListErr := exutil.GetAllPods(oc, ns1)
o.Expect(podListErr).NotTo(o.HaveOccurred())
exutil.By("Getting the IPs of the pod1's secondary interface")
pod1v4, pod1v6 := getPodMultiNetwork(oc, ns1, podList[0])
exutil.By("Getting the IPs of the pod2's secondary interface")
pod2v4, pod2v6 := getPodMultiNetwork(oc, ns1, podList[1])
exutil.By("Getting the IPs of the pod3's secondary interface")
pod3v4, pod3v6 := getPodMultiNetwork(oc, ns1, podList[2])
exutil.By("Getting the IPs of the pod4's secondary interface")
pod4v4, pod4v6 := getPodMultiNetwork(oc, ns1, podList[3])
exutil.By("Verifying the curling should pass before applying multinetworkpolicy")
curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod1v4, pod1v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod2v4, pod2v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod1v4, pod1v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod2v4, pod2v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod4v4, pod4v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod3v4, pod3v6)
exutil.By("Creating the ingress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
defer removeResource(oc, true, true, "multi-networkpolicy", "ingress-allow-same-podselector-with-same-namespaceselector", "-n", ns1)
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verifying the ingress-allow-same-podSelector-with-same-namespaceSelector policy is created in ns1")
o.Expect(output).To(o.ContainSubstring("ingress-allow-same-podselector-with-same-namespaceselector"))
exutil.By("Verifying the configured multinetworkpolicy will deny or allow the traffics as policy defined")
curlPod2PodMultiNetworkFail(oc, ns1, podList[2], pod1v4, pod1v6)
curlPod2PodMultiNetworkFail(oc, ns1, podList[2], pod2v4, pod2v6)
curlPod2PodMultiNetworkFail(oc, ns1, podList[3], pod1v4, pod1v6)
curlPod2PodMultiNetworkFail(oc, ns1, podList[3], pod2v4, pod2v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[2], pod4v4, pod4v6)
curlPod2PodMultiNetworkPass(oc, ns1, podList[3], pod3v4, pod3v6)
})
g.It("Author:huirwang-NonPreRelease-Longduration-High-75503-Overlapping pod CIDRs/IPs are allowed in different primary NADs.", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
mtu int32 = 1300
)
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has fewer than two nodes.")
}
ipStackType := checkIPStackType(oc)
exutil.By("1. Obtain first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Obtain 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/26/29", "10.150.0.0/26/29"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2010:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/26/29,2010:100:200::0/60", "10.150.0.0/26/29,2010:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, nadNS[i], nadResourcename[i]) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename[i])
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename[i])
}
}
exutil.By("Create replica pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
numberOfPods := "8"
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas="+numberOfPods, "-n", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS1Names := getPodName(oc, ns1, "name=test-pods")
e2e.Logf("Collect all the pods IPs in namespace %s", ns1)
var podsNS1IP1, podsNS1IP2 []string
for i := 0; i < len(testpodNS1Names); i++ {
podIP1, podIP2 := getPodIPUDN(oc, ns1, testpodNS1Names[i], "ovn-udn1")
if podIP2 != "" {
podsNS1IP2 = append(podsNS1IP2, podIP2)
}
podsNS1IP1 = append(podsNS1IP1, podIP1)
}
e2e.Logf("The IPs of pods in first namespace %s for UDN:\n %v %v", ns1, podsNS1IP1, podsNS1IP2)
exutil.By("create replica pods in ns2")
createResourceFromFile(oc, ns2, testPodFile)
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas="+numberOfPods, "-n", ns2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ns2, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS2Names := getPodName(oc, ns2, "name=test-pods")
e2e.Logf("Collect all the pods IPs in namespace %s", ns2)
var podsNS2IP1, podsNS2IP2 []string
for i := 0; i < len(testpodNS2Names); i++ {
podIP1, podIP2 := getPodIPUDN(oc, ns2, testpodNS2Names[i], "ovn-udn1")
if podIP2 != "" {
podsNS2IP2 = append(podsNS2IP2, podIP2)
}
podsNS2IP1 = append(podsNS2IP1, podIP1)
}
e2e.Logf("The IPs of pods in second namespace %s for UDN:\n %v %v", ns2, podsNS2IP1, podsNS2IP2)
testpodNS1NamesLen := len(testpodNS1Names)
podsNS1IP1Len := len(podsNS1IP1)
podsNS1IP2Len := len(podsNS1IP2)
exutil.By("Verify udn network should be able to access in same network.")
for i := 0; i < testpodNS1NamesLen; i++ {
for j := 0; j < podsNS1IP1Len; j++ {
if podsNS1IP2Len > 0 && podsNS1IP2[j] != "" {
_, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS1IP2[j], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
}
_, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS1IP1[j], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
}
}
podsNS2IP1Len := len(podsNS2IP1)
podsNS2IP2Len := len(podsNS2IP2)
exutil.By("Verify udn network should be isolated in different network.")
for i := 0; i < testpodNS1NamesLen; i++ {
for j := 0; j < podsNS2IP1Len; j++ {
if podsNS2IP2Len > 0 && podsNS2IP2[j] != "" {
if contains(podsNS1IP2, podsNS2IP2[j]) {
// as the destination IP in ns2 is same as one in NS1, then it will be able to access that IP and has been executed in previous steps.
continue
} else {
_, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS2IP2[j], "8080"))
o.Expect(err).To(o.HaveOccurred())
}
}
if contains(podsNS1IP1, podsNS2IP1[j]) {
// as the destination IP in ns2 is same as one in NS1, then it will be able to access that IP and has been executed in previous steps..
continue
} else {
_, err = e2eoutput.RunHostCmd(ns1, testpodNS1Names[i], "curl --connect-timeout 5 -s "+net.JoinHostPort(podsNS2IP1[j], "8080"))
o.Expect(err).To(o.HaveOccurred())
}
}
}
})
g.It("Author:meinli-High-75880-Check udn pods connection and isolation on user defined networks when NADs are created via CRD(Layer 3)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
exutil.By("3. Create CRD for UDN")
udnResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
udnNS := []string{ns1, ns2}
var cidr, ipv4cidr, ipv6cidr []string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
prefix = 64
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv4prefix = 24
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
ipv6prefix = 64
}
}
udncrd := make([]udnCRDResource, 2)
for i := 0; i < 2; i++ {
if ipStackType == "dualstack" {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
IPv4cidr: ipv4cidr[i],
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr[i],
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd[i].createUdnCRDDualStack(oc)
} else {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
cidr: cidr[i],
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd[i].createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("4. Create replica pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS1Names := getPodName(oc, ns1, "name=test-pods")
CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1])
exutil.By("5. create replica pods in ns2")
createResourceFromFile(oc, ns2, testPodFile)
err = waitForPodWithLabelReady(oc, ns2, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS2Names := getPodName(oc, ns2, "name=test-pods")
exutil.By("6. verify isolation on user defined networks")
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0])
//default network connectivity should also be isolated
CurlPod2PodFail(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0])
})
g.It("Author:meinli-High-75881-Check udn pods connection and isolation on user defined networks when NADs are created via CRD(Layer 2)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
exutil.By("3. Create CRD for UDN")
udnResourcename := []string{"l2-network-" + ns1, "l2-network-" + ns2}
udnNS := []string{ns1, ns2}
var cidr, ipv4cidr, ipv6cidr []string
if ipStackType == "ipv4single" {
cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
} else {
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
}
}
udncrd := make([]udnCRDResource, 2)
for i := 0; i < 2; i++ {
if ipStackType == "dualstack" {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
IPv4cidr: ipv4cidr[i],
IPv6cidr: ipv6cidr[i],
template: udnCRDdualStack,
}
udncrd[i].createLayer2DualStackUDNCRD(oc)
} else {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
cidr: cidr[i],
template: udnCRDSingleStack,
}
udncrd[i].createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("4. Create replica pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS1Names := getPodName(oc, ns1, "name=test-pods")
CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1])
exutil.By("5. create replica pods in ns2")
createResourceFromFile(oc, ns2, testPodFile)
err = waitForPodWithLabelReady(oc, ns2, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS2Names := getPodName(oc, ns2, "name=test-pods")
exutil.By("6. verify isolation on user defined networks")
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0])
//default network connectivity should also be isolated
CurlPod2PodFail(oc, ns1, testpodNS1Names[0], ns2, testpodNS2Names[0])
})
g.It("Author:asood-High-75899-Validate L2 and L3 Pod2Egress traffic in shared and local gateway mode", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDL2dualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnCRDL2SingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
udnCRDL3dualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnCRDL3SingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
udnNadtemplate = filepath.Join(buildPruningBaseDir, "udn/udn_nad_template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
mtu int32 = 1300
pingIPv4Cmd = "ping -c 2 8.8.8.8"
pingIPv6Cmd = "ping6 -c 2 2001:4860:4860::8888"
pingDNSCmd = "ping -c 2 www.google.com"
udnNS = []string{}
pingCmds = []string{}
)
if checkProxy(oc) {
g.Skip("This cluster has proxy configured, egress access cannot be tested on the cluster, skip the test.")
}
ipStackType := checkIPStackType(oc)
if ipStackType == "dualstack" || ipStackType == "ipv6single" {
if !checkIPv6PublicAccess(oc) {
g.Skip("This cluster is dualstack/IPv6 with no access to public websites, egress access cannot be tested on the cluster, skip the test.")
}
}
e2e.Logf("The gateway mode of the cluster is %s", getOVNGatewayMode(oc))
exutil.By("1. Create four UDN namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
udnNS = append(udnNS, oc.Namespace())
}
var cidr, ipv4cidr, ipv6cidr []string
var prefix, ipv4prefix, ipv6prefix int32
pingCmds = append(pingCmds, pingDNSCmd)
if ipStackType == "ipv4single" {
cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
prefix = 24
pingCmds = append(pingCmds, pingIPv4Cmd)
} else {
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
prefix = 64
pingCmds = append(pingCmds, pingIPv6Cmd)
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
ipv4prefix = 24
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
ipv6prefix = 64
pingCmds = append(pingCmds, pingIPv4Cmd)
pingCmds = append(pingCmds, pingIPv6Cmd)
}
}
exutil.By("2. Create CRD for UDN in first two namespaces")
udnResourcename := []string{"l2-network-" + udnNS[0], "l3-network-" + udnNS[1]}
udnDSTemplate := []string{udnCRDL2dualStack, udnCRDL3dualStack}
udnSSTemplate := []string{udnCRDL2SingleStack, udnCRDL3SingleStack}
udncrd := make([]udnCRDResource, 2)
for i := 0; i < 2; i++ {
if ipStackType == "dualstack" {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
IPv4cidr: ipv4cidr[i],
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr[i],
IPv6prefix: ipv6prefix,
template: udnDSTemplate[i],
}
switch i {
case 0:
udncrd[0].createLayer2DualStackUDNCRD(oc)
case 1:
udncrd[1].createUdnCRDDualStack(oc)
}
} else {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
cidr: cidr[i],
prefix: prefix,
template: udnSSTemplate[i],
}
switch i {
case 0:
udncrd[0].createLayer2SingleStackUDNCRD(oc)
case 1:
udncrd[1].createUdnCRDSingleStack(oc)
}
}
err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("3. Create NAD for UDN in last two namespaces")
udnNADResourcename := []string{"l2-network-" + udnNS[2], "l3-network-" + udnNS[3]}
topology := []string{"layer2", "layer3"}
udnnad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
udnnad[i] = udnNetDefResource{
nadname: udnNADResourcename[i],
namespace: udnNS[i+2],
nad_network_name: udnNADResourcename[i],
topology: topology[i],
subnet: "",
mtu: mtu,
net_attach_def_name: fmt.Sprintf("%s/%s", udnNS[i+2], udnNADResourcename[i]),
role: "primary",
template: udnNadtemplate,
}
if ipStackType == "dualstack" {
udnnad[i].subnet = fmt.Sprintf("%s,%s", ipv4cidr[i], ipv6cidr[i])
} else {
udnnad[i].subnet = cidr[i]
}
udnnad[i].createUdnNad(oc)
}
exutil.By("4. Create replica pods in namespaces")
for _, ns := range udnNS {
e2e.Logf("Validating in %s namespace", ns)
createResourceFromFile(oc, ns, testPodFile)
err := waitForPodWithLabelReady(oc, ns, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "Pods with label name=test-pods not ready")
testpodNSNames := getPodName(oc, ns, "name=test-pods")
CurlPod2PodPassUDN(oc, ns, testpodNSNames[0], ns, testpodNSNames[1])
for _, pingCmd := range pingCmds {
pingResponse, err := execCommandInSpecificPod(oc, ns, testpodNSNames[0], pingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pingResponse, "0% packet loss")).To(o.BeTrue())
}
}
})
g.It("Author:meinli-High-75955-Verify UDN failed message when user defined join subnet overlaps user defined subnet (Layer3)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDL3dualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnCRDL3SingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
UserDefinedPrimaryNetworkJoinSubnetV4 = "100.65.0.0/16"
UserDefinedPrimaryNetworkJoinSubnetV6 = "fd99::/48"
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create namespace")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
exutil.By("2. Create CRD for UDN")
var udncrd udnCRDResource
var cidr string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = UserDefinedPrimaryNetworkJoinSubnetV4
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = UserDefinedPrimaryNetworkJoinSubnetV6
prefix = 64
} else {
ipv4prefix = 24
ipv6prefix = 64
}
}
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75995",
namespace: ns,
role: "Primary",
mtu: mtu,
IPv4cidr: UserDefinedPrimaryNetworkJoinSubnetV4,
IPv4prefix: ipv4prefix,
IPv6cidr: UserDefinedPrimaryNetworkJoinSubnetV6,
IPv6prefix: ipv6prefix,
template: udnCRDL3dualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75995",
namespace: ns,
role: "Primary",
mtu: mtu,
cidr: cidr,
prefix: prefix,
template: udnCRDL3SingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).To(o.HaveOccurred())
exutil.By("3. Check UDN failed message")
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("userdefinednetwork.k8s.ovn.org", udncrd.crdname, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.Or(
o.ContainSubstring(fmt.Sprintf("illegal network configuration: user defined join subnet \"100.65.0.0/16\" overlaps user defined subnet \"%s\"", UserDefinedPrimaryNetworkJoinSubnetV4)),
o.ContainSubstring(fmt.Sprintf("illegal network configuration: user defined join subnet \"fd99::/64\" overlaps user defined subnet \"%s\"", UserDefinedPrimaryNetworkJoinSubnetV6))))
})
g.It("Author:anusaxen-Critical-75984-Check udn pods isolation on user defined networks post OVN gateway migration", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
exutil.By("3. Create 3rd namespace")
oc.CreateNamespaceUDN()
ns3 := oc.Namespace()
exutil.By("4. Create 4th namespace")
oc.CreateNamespaceUDN()
ns4 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2, "l2-network-" + ns3, "l2-network-" + ns4}
nadNS := []string{ns1, ns2, ns3, ns4}
topo := []string{"layer3", "layer3", "layer2", "layer2"}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.151.0.0/16/24", "10.152.0.0/16", "10.153.0.0/16"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60", "2012:100:200::0/60", "2013:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60", "10.152.0.0/16,2012:100:200::0/60", "10.153.0.0/16,2013:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 4)
for i := 0; i < 4; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: topo[i],
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
pod := make([]udnPodResource, 4)
for i := 0; i < 4; i++ {
exutil.By("create a udn hello pods in ns1 ns2 ns3 and ns4")
pod[i] = udnPodResource{
name: "hello-pod",
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[i].createUdnPod(oc)
waitPodReady(oc, pod[i].namespace, pod[i].name)
}
exutil.By("create another udn hello pod in ns1 to ensure layer3 conectivity post migration among'em")
pod_ns1 := udnPodResource{
name: "hello-pod-ns1",
namespace: nadNS[0],
label: "hello-pod",
template: udnPodTemplate,
}
pod_ns1.createUdnPod(oc)
waitPodReady(oc, pod_ns1.namespace, pod_ns1.name)
exutil.By("create another udn hello pod in ns3 to ensure layer2 conectivity post migration among'em")
pod_ns3 := udnPodResource{
name: "hello-pod-ns3",
namespace: nadNS[2],
label: "hello-pod",
template: udnPodTemplate,
}
pod_ns3.createUdnPod(oc)
waitPodReady(oc, pod_ns3.namespace, pod_ns3.name)
//need to find out original mode cluster is on so that we can revert back to same post test
var desiredMode string
origMode := getOVNGatewayMode(oc)
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
//udn network connectivity for layer3 should be isolated
CurlPod2PodFailUDN(oc, ns1, pod[0].name, ns2, pod[1].name)
//default network connectivity for layer3 should also be isolated
CurlPod2PodFail(oc, ns1, pod[0].name, ns2, pod[1].name)
//udn network connectivity for layer2 should be isolated
CurlPod2PodFailUDN(oc, ns3, pod[2].name, ns4, pod[3].name)
//default network connectivity for layer2 should also be isolated
CurlPod2PodFail(oc, ns3, pod[2].name, ns4, pod[3].name)
//ensure udn network connectivity for layer3 should be there
CurlPod2PodPassUDN(oc, ns1, pod[0].name, ns1, pod_ns1.name)
//ensure udn network connectivity for layer2 should be there
CurlPod2PodPassUDN(oc, ns3, pod[2].name, ns3, pod_ns3.name)
})
g.It("Author:anusaxen-NonPreRelease-Longduration-Critical-76939-Check udn pods isolation on a scaled node [Disruptive]", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
udnPodTemplateNode = filepath.Join(testDataDirUDN, "udn_test_pod_template_node.yaml")
udnCRDSingleStack = filepath.Join(testDataDirUDN, "udn_crd_singlestack_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType != "ipv4single" {
g.Skip("This case requires IPv4 single stack cluster")
}
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.OpenStack)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
udnResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
udnNS := []string{ns1, ns2}
var cidr []string
var prefix int32
cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
prefix = 24
udncrd := make([]udnCRDResource, 2)
for i := 0; i < 2; i++ {
udncrd[i] = udnCRDResource{
crdname: udnResourcename[i],
namespace: udnNS[i],
role: "Primary",
mtu: mtu,
cidr: cidr[i],
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd[i].createUdnCRDSingleStack(oc)
err := waitUDNCRDApplied(oc, udnNS[i], udncrd[i].crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
//following code block to scale up a node on cluster
exutil.By("1. Create a new machineset, get the new node created\n")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-76939"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
o.Expect(len(machineName)).ShouldNot(o.Equal(0))
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName[0])
e2e.Logf("Get nodeName: %v", nodeName)
checkNodeStatus(oc, nodeName, "Ready")
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResourceNode{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
nodename: nodeName,
template: udnPodTemplateNode,
}
pod2.createUdnPodNode(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
g.It("Author:meinli-NonHyperShiftHOST-High-77517-Validate pod2pod connection within and across node when creating UDN with Secondary role from same namespace (Layer3)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
mtu int32 = 9000
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Get namespace and worker node")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("2. create UDN with Secondary role and Primary role")
var cidr, ipv4cidr, ipv6cidr []string
var prefix, ipv4prefix, ipv6prefix int32
cidr = []string{"10.150.0.0/16", "10.200.0.0/16"}
prefix = 24
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
prefix = 64
}
ipv4cidr = []string{"10.150.0.0/16", "10.200.0.0/16"}
ipv4prefix = 24
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
ipv6prefix = 64
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "l3-secondary-77517",
namespace: ns,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr[0],
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr[0],
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "l3-secondary-77517",
namespace: ns,
role: "Secondary",
mtu: mtu,
cidr: cidr[0],
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
// create Primary UDN
createGeneralUDNCRD(oc, ns, "l3-primary-77517", ipv4cidr[1], ipv6cidr[1], cidr[1], "layer3")
exutil.By("3. Create 2 pods within the same node and 1 pod across with different nodes")
pods := make([]udnPodSecNADResourceNode, 3)
var podNames []string
for i := 0; i < 2; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nadname: udncrd.crdname,
nodename: nodeList.Items[i].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[i].name)
podNames = append(podNames, pods[i].name)
}
pods[2] = udnPodSecNADResourceNode{
name: "hello-pod-2",
namespace: ns,
nadname: udncrd.crdname,
nodename: nodeList.Items[1].Name,
template: udnPodTemplate,
}
pods[2].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[2].name)
podNames = append(podNames, pods[2].name)
exutil.By("4. Check pods subnet overlap within and across nodes")
o.Expect(checkPodCIDRsOverlap(oc, ns, ipStackType, []string{podNames[2], podNames[0]}, "net1")).Should(o.BeFalse())
o.Expect(checkPodCIDRsOverlap(oc, ns, ipStackType, []string{podNames[2], podNames[1]}, "net1")).Should(o.BeTrue())
exutil.By("5. Validate pod2pod connection within the same node and across with different nodes")
CurlUDNPod2PodPassMultiNetwork(oc, ns, ns, podNames[2], "net1", podNames[0], "net1")
CurlUDNPod2PodPassMultiNetwork(oc, ns, ns, podNames[2], "net1", podNames[1], "net1")
exutil.By("6. Validate isolation between Primary and Secondary interface")
CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "ovn-udn1", podNames[1], "net1")
CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "net1", podNames[1], "ovn-udn1")
})
g.It("Author:meinli-NonHyperShiftHOST-High-77519-Validate pod2pod isolation within and across nodes when creating UDN with Secondary role from different namespaces (Layer3)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
mtu int32 = 9000
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Get namespace and worker node")
ns1 := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("2. create UDN with Secondary role in ns1")
var cidr, ipv4cidr, ipv6cidr []string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = []string{"10.150.0.0/16", "10.200.0.0/16"}
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
prefix = 64
} else {
ipv4cidr = []string{"10.150.0.0/16", "10.200.0.0/16"}
ipv4prefix = 24
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
ipv6prefix = 64
}
}
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "l3-secondary",
namespace: ns1,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr[0],
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr[0],
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "l3-secondary",
namespace: ns1,
role: "Secondary",
mtu: mtu,
cidr: cidr[0],
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. validate Layer3 router is created in OVN")
ovnMasterPodName := getOVNKMasterOVNkubeNode(oc)
o.Expect(ovnMasterPodName).NotTo(o.BeEmpty())
o.Eventually(func() bool {
return checkOVNRouter(oc, "l3.secondary_ovn_cluster_router", ovnMasterPodName)
}, 20*time.Second, 5*time.Second).Should(o.BeTrue(), "The correct OVN router is not created")
exutil.By("4. create 1 pod with secondary annotation in ns1")
var podNames []string
// create 1 pod in ns1
pod1 := udnPodSecNADResourceNode{
name: "hello-pod-ns1",
namespace: ns1,
nadname: udncrd.crdname,
nodename: nodeList.Items[0].Name,
template: udnPodTemplate,
}
pod1.createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns1, pod1.name)
podNames = append(podNames, pod1.name)
exutil.By("5. create UDN with secondary role in ns2")
// create 2nd namespace
oc.SetupProject()
ns2 := oc.Namespace()
udncrd.namespace = ns2
if ipStackType == "dualstack" {
udncrd.IPv4cidr = ipv4cidr[1]
udncrd.IPv6cidr = ipv6cidr[1]
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd.cidr = cidr[1]
udncrd.createUdnCRDSingleStack(oc)
}
err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6. create 2 pods with secondary annotation in ns2")
pods := make([]udnPodSecNADResourceNode, 2)
//create 2 pods in ns2
for i := 0; i < 2; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns2,
nadname: udncrd.crdname,
nodename: nodeList.Items[i].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns2, pods[i].name)
podNames = append(podNames, pods[i].name)
}
exutil.By("7. Validate pod2pod isolation from secondary network in different namespaces")
CurlUDNPod2PodFailMultiNetwork(oc, ns1, ns2, podNames[0], "net1", podNames[1], "net1")
CurlUDNPod2PodFailMultiNetwork(oc, ns1, ns2, podNames[0], "net1", podNames[2], "net1")
CurlUDNPod2PodPassMultiNetwork(oc, ns2, ns2, podNames[1], "net1", podNames[2], "net1")
})
g.It("Author:meinli-NonHyperShiftHOST-High-77563-Validate pod2pod connection within and across node when creating UDN with Secondary role from same namespace (Layer2)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
mtu int32 = 9000
podenvname = "Hello OpenShift"
)
exutil.By("1. Get namespace and worker node")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("2. create Layer2 UDN with Secondary role and Primary role")
ipStackType := checkIPStackType(oc)
var cidr string
var ipv4cidr, ipv6cidr []string
cidr = "10.200.0.0/16"
if ipStackType == "ipv6single" {
cidr = "2011:100:200::0/60"
}
ipv4cidr = []string{"10.150.0.0/16", "10.200.0.0/16"}
ipv6cidr = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
udncrd := udnCRDResource{
crdname: "l2-secondary",
namespace: ns,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr[0],
IPv6cidr: ipv6cidr[0],
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
err = waitUDNCRDApplied(oc, udncrd.namespace, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
createGeneralUDNCRD(oc, ns, "l2-primary-network", ipv4cidr[1], ipv6cidr[1], cidr, "layer2")
exutil.By("3. create 2 pods within the same node and 1 pod across with different nodes")
pods := make([]udnPodSecNADResourceNode, 3)
var podNames []string
for i := 0; i < 2; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nadname: udncrd.crdname,
nodename: nodeList.Items[i].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[i].name)
podNames = append(podNames, pods[i].name)
}
pods[2] = udnPodSecNADResourceNode{
name: "hello-pod-2",
namespace: ns,
nadname: udncrd.crdname,
nodename: nodeList.Items[1].Name,
template: udnPodTemplate,
}
pods[2].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[2].name)
podNames = append(podNames, pods[2].name)
exutil.By("4. Check pods subnet overlap within and across nodes")
o.Expect(checkPodCIDRsOverlap(oc, ns, "dualstack", []string{podNames[2], podNames[0]}, "net1")).Should(o.BeTrue())
o.Expect(checkPodCIDRsOverlap(oc, ns, "dualstack", []string{podNames[2], podNames[1]}, "net1")).Should(o.BeTrue())
exutil.By("5. Validate pod2pod connection (dual stack) within the same node")
pod0IPv4, pod0IPv6 := getPodMultiNetwork(oc, ns, podNames[0])
e2e.Logf("Pod0 IPv4 address is: %v, IPv6 address is: %v", pod0IPv4, pod0IPv6)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod0IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod0IPv6, "net1", podenvname)
exutil.By("6. Validate pod2pod connection (dual stack) across with different nodes")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[1])
e2e.Logf("Pod1 IPv4 address is: %v, IPv6 address is: %v", pod1IPv4, pod1IPv6)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname)
exutil.By("7. Validate isolation between primary and secondary role")
CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "ovn-udn1", podNames[1], "net1")
CurlUDNPod2PodFailMultiNetwork(oc, ns, ns, podNames[0], "net1", podNames[1], "ovn-udn1")
})
g.It("Author:meinli-NonHyperShiftHOST-High-77564-Validate pod2pod isolation within and across node when creating UDN with Secondary role from different namespaces (Layer2)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
mtu int32 = 9000
podenvname = "Hello OpenShift"
)
exutil.By("1. Get namespace and worker node")
ns1 := oc.Namespace()
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
exutil.By("2. Create Layer2 UDN with Secondary role in ns1")
ipv4cidr := []string{"10.150.0.0/16", "10.200.0.0/16"}
ipv6cidr := []string{"2010:100:200::0/60", "2011:100:200::0/60"}
udncrd1 := udnCRDResource{
crdname: "l2-secondary-ns1",
namespace: ns1,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr[0],
IPv6cidr: ipv6cidr[0],
template: udnCRDdualStack,
}
udncrd1.createLayer2DualStackUDNCRD(oc)
err = waitUDNCRDApplied(oc, udncrd1.namespace, udncrd1.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. create 1 pod with secondary annotation in ns1")
var podNames []string
// create 1 pod in ns1
pod1 := udnPodSecNADResourceNode{
name: "hello-pod-ns1",
namespace: ns1,
nadname: udncrd1.crdname,
nodename: nodeList.Items[0].Name,
template: udnPodTemplate,
}
pod1.createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns1, pod1.name)
podNames = append(podNames, pod1.name)
exutil.By("4. create Layer2 UDN with secondary role in ns2")
// create 2nd namespace
oc.SetupProject()
ns2 := oc.Namespace()
udncrd2 := udnCRDResource{
crdname: "l2-secondary-ns2",
namespace: ns2,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr[1],
IPv6cidr: ipv6cidr[1],
template: udnCRDdualStack,
}
udncrd2.createLayer2DualStackUDNCRD(oc)
err = waitUDNCRDApplied(oc, udncrd2.namespace, udncrd2.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. create pods with secondary annotation in ns2")
pods := make([]udnPodSecNADResourceNode, 2)
//create 2 pods in ns2
for i := 0; i < 2; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns2,
nadname: udncrd2.crdname,
nodename: nodeList.Items[i].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns2, pods[i].name)
podNames = append(podNames, pods[i].name)
}
exutil.By("6. validate pod2pod isolation (dual stack) within the same node")
pod0IPv4, pod0IPv6 := getPodMultiNetwork(oc, ns2, podNames[1])
e2e.Logf("Pod0 IPv4 address is: %v, IPv6 address is: %v", pod0IPv4, pod0IPv6)
CurlMultusPod2PodFail(oc, ns1, podNames[0], pod0IPv4, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns1, podNames[0], pod0IPv6, "net1", podenvname)
exutil.By("7. validate pod2pod isolation (dual stack) across with different node")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns2, podNames[2])
e2e.Logf("Pod1 IPv4 address is: %v, IPv6 address is: %v", pod1IPv4, pod1IPv6)
CurlMultusPod2PodFail(oc, ns1, podNames[0], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns1, podNames[0], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns2, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns2, podNames[1], pod1IPv6, "net1", podenvname)
})
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-High-77656-Verify ingress-ipblock policy for UDN pod's secondary interface (Layer2). [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
multinetworkipBlockIngressTemplateDual = filepath.Join(buildPruningBaseDir, "multihoming/multiNetworkPolicy_ingress_ipblock_template.yaml")
patchSResource = "networks.operator.openshift.io/cluster"
mtu int32 = 9000
podenvname = "Hello OpenShift"
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
exutil.By("Getting the namespace name")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
exutil.By("Enabling useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False")
waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("Wait for the NetworkOperator to become functional after enabling useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False")
waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
exutil.By("Creating Layer2 UDN CRD with Primary role")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75239",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDSingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating Layer2 UDN CRD with Secondary role")
ipv4cidr1 := "20.200.200.0/24"
ipv6cidr1 := "2000:200:200::0/64"
nadName1 := "ipblockingress77656"
nsWithnad := ns + "/" + nadName1
udncrd1 := udnCRDResource{
crdname: nadName1,
namespace: ns,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr1,
IPv6cidr: ipv6cidr1,
template: udnCRDdualStack,
}
udncrd1.createLayer2DualStackUDNCRD(oc)
err = waitUDNCRDApplied(oc, udncrd1.namespace, udncrd1.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating three testing pods consuming above network-attach-definition in ns")
pods := make([]udnPodSecNADResourceNode, 3)
var podNames []string
for i := 0; i < 3; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nadname: udncrd1.crdname,
nodename: nodeList.Items[0].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[i].name)
podNames = append(podNames, pods[i].name)
}
exutil.By("Verifying the all pods get dual IPs")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[0])
pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns, podNames[1])
pod3IPv4, pod3IPv6 := getPodMultiNetwork(oc, ns, podNames[2])
pod3IPv4WithCidr := pod3IPv4 + "/32"
pod3IPv6WithCidr := pod3IPv6 + "/128"
exutil.By("Verifying that there is no traffic blocked between pods")
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname)
exutil.By("Creating ipBlock Ingress Dual CIDRs Policy to allow traffic only from pod3")
defer removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-ingress", "-n", ns)
IPBlock := multinetworkipBlockCIDRsDual{
name: "multinetworkipblock-dual-cidrs-ingress",
namespace: ns,
cidrIpv4: pod3IPv4WithCidr,
cidrIpv6: pod3IPv6WithCidr,
policyfor: nsWithnad,
template: multinetworkipBlockIngressTemplateDual,
}
IPBlock.createMultinetworkipBlockCIDRDual(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("multinetworkipblock-dual-cidrs-ingress"))
exutil.By("Verifying the ipBlock Ingress Dual CIDRs policy ensures that only traffic from pod3 is allowed")
CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname)
exutil.By("Deleting ipBlock Ingress Dual CIDRs Policy")
removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-ingress", "-n", ns)
policyoutput1, policyerr1 := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output()
o.Expect(policyerr1).NotTo(o.HaveOccurred())
o.Expect(policyoutput1).NotTo(o.ContainSubstring("multinetworkipblock-dual-cidrs-ingress"))
exutil.By("Verifying that there is no traffic blocked between pods after deleting policy")
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname)
})
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-78125-Verify egress-ipblock policy for UDN pod's secondary interface (Layer2). [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
multinetworkipBlockegressTemplateDual = filepath.Join(buildPruningBaseDir, "multihoming/multiNetworkPolicy_egress_ipblock_template.yaml")
patchSResource = "networks.operator.openshift.io/cluster"
mtu int32 = 9000
podenvname = "Hello OpenShift"
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
exutil.By("Getting the namespace name")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
exutil.By("Enabling useMultiNetworkPolicy in the cluster")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False")
waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
exutil.By("Waitting for the NetworkOperator to become functional after enabling useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 10, 5, "True.*True.*False")
waitForNetworkOperatorState(oc, 60, 15, "True.*False.*False")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
exutil.By("Creating Layer2 UDN CRD with Primary role")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75239",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDSingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating Layer2 UDN CRD with Secondary role")
ipv4cidr1 := "20.200.200.0/24"
ipv6cidr1 := "2000:200:200::0/64"
nadName1 := "ipblockegress78125"
nsWithnad := ns + "/" + nadName1
udncrd1 := udnCRDResource{
crdname: nadName1,
namespace: ns,
role: "Secondary",
mtu: mtu,
IPv4cidr: ipv4cidr1,
IPv6cidr: ipv6cidr1,
template: udnCRDdualStack,
}
udncrd1.createLayer2DualStackUDNCRD(oc)
err = waitUDNCRDApplied(oc, udncrd1.namespace, udncrd1.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating three testing pods consuming above network-attach-definition in ns")
pods := make([]udnPodSecNADResourceNode, 3)
var podNames []string
for i := 0; i < 3; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nadname: udncrd1.crdname,
nodename: nodeList.Items[0].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[i].name)
podNames = append(podNames, pods[i].name)
}
exutil.By("Verifying the all pods get dual IPs")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[0])
pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns, podNames[1])
pod3IPv4, pod3IPv6 := getPodMultiNetwork(oc, ns, podNames[2])
pod3IPv4WithCidr := pod3IPv4 + "/32"
pod3IPv6WithCidr := pod3IPv6 + "/128"
exutil.By("Verifying that there is no traffic blocked between pods")
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv6, "net1", podenvname)
exutil.By("Creating ipBlock egress Dual CIDRs Policy to allow traffic only to pod3")
defer removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-egress", "-n", ns)
IPBlock := multinetworkipBlockCIDRsDual{
name: "multinetworkipblock-dual-cidrs-egress",
namespace: ns,
cidrIpv4: pod3IPv4WithCidr,
cidrIpv6: pod3IPv6WithCidr,
policyfor: nsWithnad,
template: multinetworkipBlockegressTemplateDual,
}
IPBlock.createMultinetworkipBlockCIDRDual(oc)
policyoutput, policyerr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output()
o.Expect(policyerr).NotTo(o.HaveOccurred())
o.Expect(policyoutput).To(o.ContainSubstring("multinetworkipblock-dual-cidrs-egress"))
exutil.By("Verifying the ipBlock egress Dual CIDRs policy ensures that only traffic to pod3 is allowed")
CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodFail(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv6, "net1", podenvname)
exutil.By("Deleting ipBlock egress Dual CIDRs Policy")
removeResource(oc, true, true, "multi-networkpolicy", "multinetworkipblock-dual-cidrs-egress", "-n", ns)
policyoutput1, policyerr1 := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output()
o.Expect(policyerr1).NotTo(o.HaveOccurred())
o.Expect(policyoutput1).NotTo(o.ContainSubstring("multinetworkipblock-dual-cidrs-egress"))
exutil.By("Verifying that there is no traffic blocked between pods after deleting policy")
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod3IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod3IPv6, "net1", podenvname)
})
g.It("Author:meinli-Medium-78329-Validate pod2pod on diff workers and host2pod on same/diff workers (UDN Layer3 with Primary role)", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
)
exutil.By("1. Get worker node and namespace")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
oc.CreateNamespaceUDN()
ns := oc.Namespace()
exutil.By("2. Create UDN CRD Layer3 with Primary role")
err = applyL3UDNtoNamespace(oc, ns, 0)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. Create two pods on diff workers in ns")
pods := make([]pingPodResourceNode, 2)
for i := 0; i < 2; i++ {
pods[i] = pingPodResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nodename: nodeList.Items[i].Name,
template: pingPodNodeTemplate,
}
pods[i].createPingPodNode(oc)
waitPodReady(oc, ns, pods[i].name)
}
exutil.By("4. Validate pod to pod on different workers")
CurlPod2PodPassUDN(oc, ns, pods[0].name, ns, pods[1].name)
exutil.By("5. validate host to pod on same and diff workers")
CurlNode2PodFailUDN(oc, nodeList.Items[0].Name, ns, pods[0].name)
CurlNode2PodFailUDN(oc, nodeList.Items[0].Name, ns, pods[1].name)
})
g.It("Author:qiowang-High-77542-Check default network ports can be exposed on UDN pods(layer3) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
tcpPort = 8080
udpPort = 6000
sctpPort = 30102
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
exutil.By("1. Create the first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
exutil.By("2. Create a hello pod in ns1")
createResourceFromFile(oc, ns1, statefulSetHelloPod)
pod1Err := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(pod1Err, "The statefulSet pod is not ready")
pod1Name := getPodName(oc, ns1, "app=hello")[0]
exutil.By("3. Create the 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
exutil.By("4. Create CRD for UDN in ns2")
err := applyL3UDNtoNamespace(oc, ns2, 0)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Create a udn hello pod in ns2")
createResourceFromFile(oc, ns2, statefulSetHelloPod)
pod2Err := waitForPodWithLabelReady(oc, ns2, "app=hello")
exutil.AssertWaitPollNoErr(pod2Err, "The statefulSet pod is not ready")
pod2Name := getPodName(oc, ns2, "app=hello")[0]
exutil.By("6. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should not be able to access")
PingPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name)
CurlPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, false)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, false)
exutil.By("7. Add annotation to expose default network port on udn pod")
annotationConf := `k8s.ovn.org/open-default-ports=[{"protocol":"icmp"}, {"protocol":"tcp","port":` + strconv.Itoa(tcpPort) + `}, {"protocol":"udp","port":` + strconv.Itoa(udpPort) + `}, {"protocol":"sctp","port":` + strconv.Itoa(sctpPort) + `}]`
err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("pod", pod2Name, "-n", ns2, "--overwrite", annotationConf).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("8. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should be able to access")
PingPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name)
CurlPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, true)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, true)
})
g.It("Author:qiowang-High-77742-Check default network ports can be exposed on UDN pods(layer2) [Serial]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
statefulSetHelloPod = filepath.Join(buildPruningBaseDir, "statefulset-hello.yaml")
tcpPort = 8080
udpPort = 6000
sctpPort = 30102
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
exutil.By("1. Create the first namespace")
oc.SetupProject()
ns1 := oc.Namespace()
exutil.By("2. Create a hello pod in ns1")
createResourceFromFile(oc, ns1, statefulSetHelloPod)
pod1Err := waitForPodWithLabelReady(oc, ns1, "app=hello")
exutil.AssertWaitPollNoErr(pod1Err, "The statefulSet pod is not ready")
pod1Name := getPodName(oc, ns1, "app=hello")[0]
exutil.By("3. Create the 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
exutil.By("4. Create CRD for UDN in ns2")
var cidr, ipv4cidr, ipv6cidr string
ipStackType := checkIPStackType(oc)
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
udncrd := udnCRDResource{
crdname: "udn-l2-network-77742",
namespace: ns2,
role: "Primary",
mtu: 1300,
}
if ipStackType == "dualstack" {
udncrd.IPv4cidr = ipv4cidr
udncrd.IPv6cidr = ipv6cidr
udncrd.template = udnCRDdualStack
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd.cidr = cidr
udncrd.template = udnCRDSingleStack
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns2, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Create a udn hello pod in ns2")
createResourceFromFile(oc, ns2, statefulSetHelloPod)
pod2Err := waitForPodWithLabelReady(oc, ns2, "app=hello")
exutil.AssertWaitPollNoErr(pod2Err, "The statefulSet pod is not ready")
pod2Name := getPodName(oc, ns2, "app=hello")[0]
exutil.By("6. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should not be able to access")
PingPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name)
CurlPod2PodFail(oc, ns1, pod1Name, ns2, pod2Name)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, false)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, false)
exutil.By("7. Add annotation to expose default network port on udn pod")
annotationConf := `k8s.ovn.org/open-default-ports=[{"protocol":"icmp"}, {"protocol":"tcp","port":` + strconv.Itoa(tcpPort) + `}, {"protocol":"udp","port":` + strconv.Itoa(udpPort) + `}, {"protocol":"sctp","port":` + strconv.Itoa(sctpPort) + `}]`
err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("pod", pod2Name, "-n", ns2, "--overwrite", annotationConf).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("8. Check ICMP/TCP/UDP/SCTP traffic between pods in ns1 and ns2, should be able to access")
PingPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name)
CurlPod2PodPass(oc, ns1, pod1Name, ns2, pod2Name)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "UDP", udpPort, true)
verifyConnPod2Pod(oc, ns1, pod1Name, ns2, pod2Name, "SCTP", sctpPort, true)
})
g.It("Author:meinli-Medium-78492-[CUDN layer3] Validate CUDN enable creating shared OVN network across multiple namespaces. [Serial]", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
matchLabelKey = "test.io"
matchValue = "cudn-network-" + getRandomString()
crdName = "cudn-network-78492"
)
exutil.By("1. Create three namespaces, first two for CUDN and label them with cudn selector, last namespace is for default network")
var allNS []string
for i := 0; i < 3; i++ {
if i != 2 {
oc.CreateNamespaceUDN()
allNS = append(allNS, oc.Namespace())
} else {
oc.SetupProject()
allNS = append(allNS, oc.Namespace())
}
if i < 2 {
ns := allNS[i]
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("2. create CUDN with two namespaces")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. create pods in ns1 and ns2, one pod in ns3")
pods := make([]udnPodResource, 3)
for i := 0; i < 3; i++ {
pods[i] = udnPodResource{
name: "hello-pod-" + allNS[i],
namespace: allNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace)
pods[i].createUdnPod(oc)
waitPodReady(oc, pods[i].namespace, pods[i].name)
}
exutil.By("4. check pods' interfaces")
for i := 0; i < 2; i++ {
podIP, _ := getPodIPUDN(oc, pods[i].namespace, pods[i].name, "ovn-udn1")
o.Expect(podIP).NotTo(o.BeEmpty())
}
output, err := e2eoutput.RunHostCmd(pods[2].namespace, pods[2].name, "ip -o link show")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("ovn-udn1"))
exutil.By("5. Validate CUDN pod traffic")
CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
})
g.It("Author:meinli-Medium-78598-[CUDN layer2] Validate CUDN enable creating shared OVN network across multiple namespaces.", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
matchLabelKey = "test.io"
matchValue = "cudn-network-" + getRandomString()
crdName = "cudn-network-78598"
)
exutil.By("1. Create three namespaces, first two for CUDN and label them with cudn selector, last namespace is for default network")
var allNS []string
for i := 0; i < 3; i++ {
if i != 2 {
oc.CreateNamespaceUDN()
allNS = append(allNS, oc.Namespace())
} else {
oc.SetupProject()
allNS = append(allNS, oc.Namespace())
}
if i < 2 {
ns := allNS[i]
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
exutil.By("2. create CUDN with two namespaces")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. create pods in ns1 and ns2, one pod in ns3")
pods := make([]udnPodResource, 3)
for i := 0; i < 3; i++ {
pods[i] = udnPodResource{
name: "hello-pod-" + allNS[i],
namespace: allNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace)
pods[i].createUdnPod(oc)
waitPodReady(oc, pods[i].namespace, pods[i].name)
}
exutil.By("4. check pods' interfaces")
for i := 0; i < 2; i++ {
podIP, _ := getPodIPUDN(oc, pods[i].namespace, pods[i].name, "ovn-udn1")
o.Expect(podIP).NotTo(o.BeEmpty())
}
output, err := e2eoutput.RunHostCmd(pods[2].namespace, pods[2].name, "ip -o link show")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("ovn-udn1"))
exutil.By("5. Validate CUDN pod traffic")
CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
})
g.It("Author:anusaxen-Low-77752-Check udn pods isolation with udn crd and native NAD integration", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
o.Expect(ipStackType).NotTo(o.BeEmpty())
if ipStackType != "ipv4single" {
g.Skip("This case requires IPv4 single stack cluster")
}
var cidr string
var prefix int32
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
prefix = 24
}
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadNS := []string{ns1, ns2}
nadResourcename := []string{"l3-network-" + nadNS[0], "l3-network-" + nadNS[1]}
exutil.By(fmt.Sprintf("create native NAD %s in namespace %s", nadResourcename[0], nadNS[0]))
nad := udnNetDefResource{
nadname: nadResourcename[0],
namespace: nadNS[0],
nad_network_name: nadResourcename[0],
topology: "layer3",
subnet: "10.150.0.0/16/24",
mtu: mtu,
net_attach_def_name: nadNS[0] + "/" + nadResourcename[0],
role: "primary",
template: udnNadtemplate,
}
nad.createUdnNad(oc)
exutil.By(fmt.Sprintf("create crd NAD %s in namespace %s", nadResourcename[1], nadNS[1]))
udncrd := udnCRDResource{
crdname: nadResourcename[1],
namespace: nadNS[1],
role: "Primary",
mtu: mtu,
cidr: cidr,
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
err := waitUDNCRDApplied(oc, nadNS[1], udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
pod := make([]udnPodResource, 2)
for i := 0; i < 2; i++ {
exutil.By("create a udn hello pod in ns1 and ns2")
pod[i] = udnPodResource{
name: "hello-pod",
namespace: nadNS[i],
label: "hello-pod",
template: udnPodTemplate,
}
pod[i].createUdnPod(oc)
waitPodReady(oc, pod[i].namespace, pod[i].name)
}
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, nadNS[0], pod[0].name, nadNS[1], pod[1].name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, nadNS[0], pod[0].name, nadNS[1], pod[1].name)
})
g.It("Author:meinli-Medium-79003-[CUDN layer3] Verify that patching namespaces for existing CUDN functionality operate as intended", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
key = "test.cudn.layer3"
crdName = "cudn-network-79003"
values = []string{"value-79003-1", "value-79003-2"}
)
exutil.By("1. create two namespaces and label them")
oc.CreateNamespaceUDN()
allNS := []string{oc.Namespace()}
oc.CreateNamespaceUDN()
allNS = append(allNS, oc.Namespace())
for i := 0; i < 2; i++ {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[i], fmt.Sprintf("%s-", key)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2. create CUDN in ns1")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
cudncrd, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer3", []string{values[0], ""})
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. patch namespaces for CUDN")
patchCmd := fmt.Sprintf("{\"spec\":{\"namespaceSelector\":{\"matchExpressions\":[{\"key\": \"%s\", \"operator\": \"In\", \"values\": [\"%s\", \"%s\"]}]}}}", key, values[0], values[1])
patchResourceAsAdmin(oc, fmt.Sprintf("clusteruserdefinednetwork.k8s.ovn.org/%s", cudncrd.crdname), patchCmd)
err = waitCUDNCRDApplied(oc, cudncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteruserdefinednetwork.k8s.ovn.org", cudncrd.crdname, "-ojsonpath={.status.conditions[*].message}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(allNS[1]))
exutil.By("4. create pods in ns1 and ns2")
pods := make([]udnPodResource, 2)
for i, ns := range allNS {
pods[i] = udnPodResource{
name: "hello-pod-" + ns,
namespace: ns,
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace)
pods[i].createUdnPod(oc)
waitPodReady(oc, pods[i].namespace, pods[i].name)
}
exutil.By("5. validate connection from CUDN pod to CUDN pod")
CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
exutil.By("6. unlabel ns2")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", allNS[1], fmt.Sprintf("%s-", key)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitCUDNCRDApplied(oc, cudncrd.crdname)
o.Expect(err).To(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteruserdefinednetwork.k8s.ovn.org", cudncrd.crdname, "-ojsonpath={.status.conditions[*].message}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("failed to delete NetworkAttachmentDefinition [%s/%s]", allNS[1], cudncrd.crdname)))
exutil.By("7. validate connection from CUDN pod to CUDN pod")
CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
})
g.It("Author:meinli-Medium-78742-[CUDN layer2] Validate pod2pod traffic between CUDN and UDN NAD. [Serial]", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
key = "test.cudn.layer2"
crdName = "cudn-network-78742"
values = []string{"value-78742-1", "value-78742-2"}
)
exutil.By("1. create three namespaces, first and second for CUDN, third for UDN NAD")
oc.CreateNamespaceUDN()
cudnNS := []string{oc.Namespace()}
oc.CreateNamespaceUDN()
cudnNS = append(cudnNS, oc.Namespace())
for i := 0; i < 2; i++ {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s-", key)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", cudnNS[i], fmt.Sprintf("%s=%s", key, values[i])).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
oc.CreateNamespaceUDN()
nadNS := oc.Namespace()
exutil.By("2. create CUDN in cudnNS")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err := createCUDNCRD(oc, key, crdName, ipv4cidr, ipv6cidr, cidr, "layer2", values)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. create UDN NAD in nadNS")
var subnet string
if ipStackType == "ipv4single" {
subnet = "10.151.0.0/16"
} else {
if ipStackType == "ipv6single" {
subnet = "2011:100:200::0/60"
} else {
subnet = "10.151.0.0/16,2011:100:200::0/60"
}
}
nadResourcename := "l2-network" + nadNS
nad := udnNetDefResource{
nadname: nadResourcename,
namespace: nadNS,
nad_network_name: nadResourcename,
topology: "layer2",
subnet: subnet,
mtu: 1300,
net_attach_def_name: nadNS + "/" + nadResourcename,
role: "primary",
template: udnNadtemplate,
}
nad.createUdnNad(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, nadNS, nadResourcename) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename)
}
exutil.By("4. create pods in cudnNS and nadNS")
pods := make([]udnPodResource, 3)
for i, ns := range append(cudnNS, nadNS) {
pods[i] = udnPodResource{
name: "hello-pod-" + ns,
namespace: ns,
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace)
pods[i].createUdnPod(oc)
waitPodReady(oc, pods[i].namespace, pods[i].name)
}
exutil.By("5. Validate isolation from UDN NAD pod to CUDN pod")
CurlPod2PodFailUDN(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name)
exutil.By("6. Validate isolation from CUDN pod to UDN NAD pod")
CurlPod2PodFailUDN(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name)
exutil.By("7. Validate connection among CUDN pods")
CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
//default network connectivity should be isolated
CurlPod2PodFail(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
})
g.It("Author:meinli-Medium-78496-[CUDN layer3] Validate conflicted creation when CUDN and UDN created in the same namespace.", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
matchLabelKey = "test.io"
matchValue = "cudn-network-" + getRandomString()
crdName = "cudn-network-78496"
)
exutil.By("1. create two namespaces")
oc.CreateNamespaceUDN()
allNS := []string{oc.Namespace()}
oc.CreateNamespaceUDN()
allNS = append(allNS, oc.Namespace())
for _, ns := range allNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2. Create UDN CRD and pod in ns1")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
createGeneralUDNCRD(oc, allNS[0], "udn-network-78496-ns1", ipv4cidr, ipv6cidr, cidr, "layer3")
udnpod := udnPodResource{
name: "hello-pod-" + allNS[0],
namespace: allNS[0],
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", udnpod.name, "-n", udnpod.namespace)
udnpod.createUdnPod(oc)
waitPodReady(oc, udnpod.namespace, udnpod.name)
exutil.By("3. create CUDN in ns1 and ns2")
if ipStackType == "ipv4single" {
cidr = "10.151.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2011:100:200::0/48"
} else {
ipv4cidr = "10.151.0.0/16"
ipv6cidr = "2011:100:200::0/48"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
cudncrd, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3")
o.Expect(err).To(o.HaveOccurred())
exutil.By("4. Create pods in ns2")
cudnpod := udnPodResource{
name: "hello-pod-" + allNS[1],
namespace: allNS[1],
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", cudnpod.name, "-n", cudnpod.namespace)
cudnpod.createUdnPod(oc)
waitPodReady(oc, cudnpod.namespace, cudnpod.name)
exutil.By("5. validate CUDN in ns1 create failed and CUDN in ns2 create successfully")
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("clusteruserdefinednetwork.k8s.ovn.org", cudncrd.crdname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(fmt.Sprintf("primary network already exist in namespace \"%s\"", allNS[0])))
cudnPodIP, _ := getPodIPUDN(oc, cudnpod.namespace, cudnpod.name, "ovn-udn1")
o.Expect(cudnPodIP).NotTo(o.BeEmpty())
exutil.By("6. validate traffic isolation between UDN pod and CUDN pod")
CurlPod2PodFailUDN(oc, allNS[0], udnpod.name, allNS[1], cudnpod.name)
})
g.It("Author:meinli-Medium-78741-[CUDN layer3] validate pod2pod traffic between CUDN and UDN CRD. [Serial]", func() {
var (
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
matchLabelKey = "test.io"
matchValue = "cudn-network-" + getRandomString()
crdName = "cudn-network-78741"
)
exutil.By("1. create three namespaces, first and second for CUDN, third for UDN")
oc.CreateNamespaceUDN()
cudnNS := []string{oc.Namespace()}
oc.CreateNamespaceUDN()
cudnNS = append(cudnNS, oc.Namespace())
for _, ns := range cudnNS {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s-", matchLabelKey)).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns, fmt.Sprintf("%s=%s", matchLabelKey, matchValue)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
oc.CreateNamespaceUDN()
udnNS := oc.Namespace()
exutil.By("2. create CUDN in cudnNS")
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/60"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/60"
}
}
defer removeResource(oc, true, true, "clusteruserdefinednetwork", crdName)
_, err := applyCUDNtoMatchLabelNS(oc, matchLabelKey, matchValue, crdName, ipv4cidr, ipv6cidr, cidr, "layer3")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3. create UDN in ns3")
if ipStackType == "ipv4single" {
cidr = "10.151.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2011:100:200::0/48"
} else {
ipv4cidr = "10.151.0.0/16"
ipv6cidr = "2011:100:200::0/48"
}
}
createGeneralUDNCRD(oc, udnNS, "udn-network-78741", ipv4cidr, ipv6cidr, cidr, "layer3")
exutil.By("4. create pods in namespaces")
pods := make([]udnPodResource, 3)
for i, ns := range append(cudnNS, udnNS) {
pods[i] = udnPodResource{
name: "hello-pod-" + ns,
namespace: ns,
label: "hello-pod",
template: udnPodTemplate,
}
defer removeResource(oc, true, true, "pod", pods[i].name, "-n", pods[i].namespace)
pods[i].createUdnPod(oc)
waitPodReady(oc, pods[i].namespace, pods[i].name)
}
exutil.By("5. Validate isolation from UDN pod to CUDN pod")
CurlPod2PodFailUDN(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, pods[2].namespace, pods[2].name, pods[0].namespace, pods[0].name)
exutil.By("6. Validate isolation from CUDN pod to UDN pod")
CurlPod2PodFailUDN(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, pods[1].namespace, pods[1].name, pods[2].namespace, pods[2].name)
exutil.By("7. Validate connection among CUDN pods")
CurlPod2PodPassUDN(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
//default network connectivity should be isolated
CurlPod2PodFail(oc, pods[0].namespace, pods[0].name, pods[1].namespace, pods[1].name)
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
806993f8-9237-4c56-8b50-df79aa061f3b
|
Author:anusaxen-Critical-74921-Check udn pods isolation on user defined networks
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
g.It("Author:anusaxen-Critical-74921-Check udn pods isolation on user defined networks", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.151.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResource{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
template: udnPodTemplate,
}
pod2.createUdnPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should be isolated
CurlPod2PodFailUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should also be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
d6cefb5e-65e3-448f-bbfe-994e9e5eadd3
|
Author:anusaxen-Critical-75236-Check udn pods are not isolated if same nad network is shared across two namespaces
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
g.It("Author:anusaxen-Critical-75236-Check udn pods are not isolated if same nad network is shared across two namespaces", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.150.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2010:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.150.0.0/16/24,2010:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: "l3-network-ns1", //Keeping same nad network name across all which is l3-network-ns1
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a udn hello pod in ns2")
pod2 := udnPodResource{
name: "hello-pod-ns2",
namespace: ns2,
label: "hello-pod",
template: udnPodTemplate,
}
pod2.createUdnPod(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
//udn network connectivity should NOT be isolated
CurlPod2PodPassUDN(oc, ns1, pod1.name, ns2, pod2.name)
//default network connectivity should be isolated
CurlPod2PodFail(oc, ns1, pod1.name, ns2, pod2.name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
201c1f9f-36a2-4e2b-8a7f-bdcf4f76f39e
|
Author:huirwang-High-75223-Restarting ovn pods should not break UDN primary network traffic.[Disruptive]
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
g.It("Author:huirwang-High-75223-Restarting ovn pods should not break UDN primary network traffic.[Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
exutil.By("2. Create 2nd namespace")
oc.CreateNamespaceUDN()
ns2 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-" + ns2}
nadNS := []string{ns1, ns2}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.151.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], nadNS[i]))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: nadNS[i],
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: nadNS[i] + "/" + nadResourcename[i],
role: "primary",
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, nadNS[i], nadResourcename[i]) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename[i])
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename[i])
}
}
exutil.By("Create replica pods in ns1")
createResourceFromFile(oc, ns1, testPodFile)
err := waitForPodWithLabelReady(oc, ns1, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS1Names := getPodName(oc, ns1, "name=test-pods")
CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1])
exutil.By("create replica pods in ns2")
createResourceFromFile(oc, ns2, testPodFile)
err = waitForPodWithLabelReady(oc, ns2, "name=test-pods")
exutil.AssertWaitPollNoErr(err, "this pod with label name=test-pods not ready")
testpodNS2Names := getPodName(oc, ns2, "name=test-pods")
CurlPod2PodPassUDN(oc, ns2, testpodNS2Names[0], ns2, testpodNS2Names[1])
exutil.By("Restart OVN pods")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "--all", "-n", "openshift-ovn-kubernetes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertAllPodsToBeReady(oc, "openshift-ovn-kubernetes")
exutil.By("Verify the connection in UDN primary network not broken.")
CurlPod2PodPassUDN(oc, ns1, testpodNS1Names[0], ns1, testpodNS1Names[1])
CurlPod2PodPassUDN(oc, ns2, testpodNS2Names[0], ns2, testpodNS2Names[1])
})
| |||||
test case
|
openshift/openshift-tests-private
|
71ea6a49-3857-45f0-a75d-990407fa3f69
|
Author:huirwang-Medium-75238-NAD can be created with secondary role with primary UDN in same namespace.
|
['"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
g.It("Author:huirwang-Medium-75238-NAD can be created with secondary role with primary UDN in same namespace.", func() {
var (
udnNadtemplate = filepath.Join(testDataDirUDN, "udn_nad_template.yaml")
udnPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_template.yaml")
pingPodTemplate = filepath.Join(testDataDirUDN, "udn_test_pod_annotation_template.yaml")
mtu int32 = 1300
)
ipStackType := checkIPStackType(oc)
exutil.By("1. Create first namespace")
oc.CreateNamespaceUDN()
ns1 := oc.Namespace()
nadResourcename := []string{"l3-network-" + ns1, "l3-network-2-" + ns1}
role := []string{"primary", "secondary"}
var subnet []string
if ipStackType == "ipv4single" {
subnet = []string{"10.150.0.0/16/24", "10.161.0.0/16/24"}
} else {
if ipStackType == "ipv6single" {
subnet = []string{"2010:100:200::0/60", "2011:100:200::0/60"}
} else {
subnet = []string{"10.150.0.0/16/24,2010:100:200::0/60", "10.151.0.0/16/24,2011:100:200::0/60"}
}
}
nad := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
exutil.By(fmt.Sprintf("create NAD %s in namespace %s", nadResourcename[i], ns1))
nad[i] = udnNetDefResource{
nadname: nadResourcename[i],
namespace: ns1,
nad_network_name: nadResourcename[i],
topology: "layer3",
subnet: subnet[i],
mtu: mtu,
net_attach_def_name: ns1 + "/" + nadResourcename[i],
role: role[i],
template: udnNadtemplate,
}
nad[i].createUdnNad(oc)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, ns1, nadResourcename[i]) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadResourcename[i])
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadResourcename[i])
}
}
exutil.By("create a udn hello pod in ns1")
pod1 := udnPodResource{
name: "hello-pod-ns1",
namespace: ns1,
label: "hello-pod",
template: udnPodTemplate,
}
pod1.createUdnPod(oc)
waitPodReady(oc, pod1.namespace, pod1.name)
exutil.By("create a hello pod in ns1 refers to secondary udn network")
pod2 := udnPodSecNADResource{
name: "hello-pod-ns1-2",
namespace: ns1,
label: "hello-pod",
annotation: "/l3-network-2-" + ns1,
template: pingPodTemplate,
}
pod2.createUdnPodWithSecNAD(oc)
waitPodReady(oc, pod2.namespace, pod2.name)
exutil.By("Verify the two pods between primary and udn networks work well")
CurlPod2PodPassUDN(oc, ns1, pod1.name, ns1, pod2.name)
exutil.By("Verify the pod2 has secondary network, but pod1 doesn't. ")
pod1IPs, err := execCommandInSpecificPod(oc, ns1, pod1.name, "ip a")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pod1IPs, "net1@")).NotTo(o.BeTrue())
pod2IPs, err := execCommandInSpecificPod(oc, ns1, pod2.name, "ip a")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pod2IPs, "net1@")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
fdc1d0f4-3185-4e57-979e-99df5e10338f
|
Author:huirwang-Medium-75658-Check sctp traffic work well via udn pods user defined networks for laye3. [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
g.It("Author:huirwang-Medium-75658-Check sctp traffic work well via udn pods user defined networks for laye3. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctp/sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctp/sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_dualstack2_template.yaml")
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_singlestack_template.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
ipStackType := checkIPStackType(oc)
exutil.By("Setting privileges on the namespace")
oc.CreateNamespaceUDN()
ns := oc.Namespace()
var cidr, ipv4cidr, ipv6cidr string
var prefix, ipv4prefix, ipv6prefix int32
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
prefix = 24
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
prefix = 64
} else {
ipv4cidr = "10.150.0.0/16"
ipv4prefix = 24
ipv6cidr = "2010:100:200::0/48"
ipv6prefix = 64
}
}
exutil.By("Create CRD for UDN")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv4prefix: ipv4prefix,
IPv6cidr: ipv6cidr,
IPv6prefix: ipv6prefix,
template: udnCRDdualStack,
}
udncrd.createUdnCRDDualStack(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
prefix: prefix,
template: udnCRDSingleStack,
}
udncrd.createUdnCRDSingleStack(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
exutil.By("Verify sctp server pod can be accessed for UDN network.")
if ipStackType == "dualstack" {
sctpServerIPv6, sctpServerIPv4 := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1")
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
sctpServerIP, _ := getPodIPUDN(oc, ns, sctpServerPodName, "ovn-udn1")
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
522b1bef-8892-4967-b64d-cc8329976843
|
Author:weliang-Medium-75623-Feature Integration UDN with multus. [Disruptive]
|
['"context"', '"net"', '"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/pod_udn.go
|
g.It("Author:weliang-Medium-75623-Feature Integration UDN with multus. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
udnCRDdualStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_dualstack_template.yaml")
udnPodTemplate = filepath.Join(buildPruningBaseDir, "udn/udn_test_pod_annotation_template_node.yaml")
podenvname = "Hello OpenShift"
udnCRDSingleStack = filepath.Join(buildPruningBaseDir, "udn/udn_crd_layer2_singlestack_template.yaml")
dualstackNADTemplate = filepath.Join(buildPruningBaseDir, "multus/dualstack-NAD-template.yaml")
)
exutil.By("Getting the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(nodeList.Items) < 1 {
g.Skip("The cluster has no ready node for the testing")
}
oc.CreateNamespaceUDN()
ns := oc.Namespace()
ipStackType := checkIPStackType(oc)
var cidr, ipv4cidr, ipv6cidr string
if ipStackType == "ipv4single" {
cidr = "10.150.0.0/16"
} else {
if ipStackType == "ipv6single" {
cidr = "2010:100:200::0/48"
} else {
ipv4cidr = "10.150.0.0/16"
ipv6cidr = "2010:100:200::0/48"
}
}
exutil.By("Creating Layer2 UDN CRD with Primary role")
var udncrd udnCRDResource
if ipStackType == "dualstack" {
udncrd = udnCRDResource{
crdname: "udn-network-75239",
namespace: ns,
role: "Primary",
mtu: 1400,
IPv4cidr: ipv4cidr,
IPv6cidr: ipv6cidr,
template: udnCRDdualStack,
}
udncrd.createLayer2DualStackUDNCRD(oc)
} else {
udncrd = udnCRDResource{
crdname: "udn-network-75658",
namespace: ns,
role: "Primary",
mtu: 1400,
cidr: cidr,
template: udnCRDSingleStack,
}
udncrd.createLayer2SingleStackUDNCRD(oc)
}
err := waitUDNCRDApplied(oc, ns, udncrd.crdname)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Creating NAD for ns")
nad := dualstackNAD{
nadname: "dualstack",
namespace: ns,
plugintype: "macvlan",
mode: "bridge",
ipamtype: "whereabouts",
ipv4range: "20.200.200.0/24",
ipv6range: "2000:200:200::0/64",
ipv4rangestart: "",
ipv4rangeend: "",
ipv6rangestart: "",
ipv6rangeend: "",
template: dualstackNADTemplate,
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", nad.nadname, "-n", ns).Execute()
nad.createDualstackNAD(oc)
exutil.By("Creating three testing pods consuming above network-attach-definition in ns")
pods := make([]udnPodSecNADResourceNode, 3)
var podNames []string
for i := 0; i < 3; i++ {
pods[i] = udnPodSecNADResourceNode{
name: "hello-pod" + strconv.Itoa(i),
namespace: ns,
nadname: nad.nadname,
nodename: nodeList.Items[0].Name,
template: udnPodTemplate,
}
pods[i].createUdnPodWithSecNADNode(oc)
waitPodReady(oc, ns, pods[i].name)
podNames = append(podNames, pods[i].name)
}
exutil.By("Verifying the all pods get dual IPs")
pod1IPv4, pod1IPv6 := getPodMultiNetwork(oc, ns, podNames[0])
pod2IPv4, pod2IPv6 := getPodMultiNetwork(oc, ns, podNames[1])
exutil.By("Verifying that there is no traffic blocked between pods")
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[0], pod2IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[1], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod1IPv6, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv4, "net1", podenvname)
CurlMultusPod2PodPass(oc, ns, podNames[2], pod2IPv6, "net1", podenvname)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.