element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
adddfa72-3f95-4334-a071-ee008085dc70
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41169-MultiNetworkPolicy ingress allow diff podSelector with same namespaceSelector. [Disruptive]
|
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41169-MultiNetworkPolicy ingress allow diff podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "ingress-allow-diff-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41169a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41169b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod1ns1's secondary interface in first namespace.")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
exutil.By("3. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("4. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("5. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("6. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("7. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("8. Create Ingress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingress-allow-diff-podselector-with-same-namespaceselector"))
exutil.By("9. Same curl testing, one curl fail and three curls will pass after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-2", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "red-pod-1", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-3", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "red-pod-2", pod1ns1IPv4, pod1ns1IPv6)
exutil.By("10. Delete ingress-allow-diff-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "ingress-allow-diff-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("11. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e6d2f535-ebf9-4368-a9ce-bc6dc650004d
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41171-MultiNetworkPolicy egress allow same podSelector with same namespaceSelector. [Disruptive]
|
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41171-MultiNetworkPolicy egress allow same podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "egress-allow-same-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41171a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41171b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("3. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("4. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("5. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("7. Create egress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("egress-allow-same-podselector-with-same-namespaceselector"))
exutil.By("8. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
exutil.By("9. Delete egress-allow-same-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "egress-allow-same-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
})
| |||||
test case
|
openshift/openshift-tests-private
|
cfbb2dd3-75ce-42bc-830e-5edd9a26f9c7
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41172-MultiNetworkPolicy egress allow diff podSelector with same namespaceSelector. [Disruptive]
|
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41172-MultiNetworkPolicy egress allow diff podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "egress-allow-diff-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41172a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41172b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("3. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("4. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("5. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("7. Create egress-allow-diff-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("egress-allow-diff-podselector-with-same-namespaceselector"))
exutil.By("8. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("9. Delete egress-allow-diff-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "egress-allow-diff-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
})
| |||||
test case
|
openshift/openshift-tests-private
|
9eefd5af-3ccb-46d9-a18c-7b087e2537ef
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41170-MultiNetworkPolicy ingress ipblock. [Disruptive]
|
['"fmt"', '"net"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41170-MultiNetworkPolicy ingress ipblock. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
patchSResource := "networks.operator.openshift.io/cluster"
pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml")
netAttachDefFile := filepath.Join(buildPruningBaseDir, "ipblock-NAD.yaml")
policyFile := filepath.Join(buildPruningBaseDir, "ingress-ipBlock.yaml")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
exutil.By("1. Enable MacvlanNetworkpolicy in the cluster")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("2. Create a namespace")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41170a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
exutil.By("3. Create MultiNetworkPolicy-NAD in ns1")
err1 := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns1).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
output, err2 := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-net"))
exutil.By("4. Create six pods for ip range policy testing")
pod1ns1 := testPodMultinetwork{
name: "blue-pod-1",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod1ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
pod2ns1 := testPodMultinetwork{
name: "blue-pod-2",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod2ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
pod3ns1 := testPodMultinetwork{
name: "blue-pod-3",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod3ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
pod4ns1 := testPodMultinetwork{
name: "blue-pod-4",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod4ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod4ns1.namespace, pod4ns1.name)
pod5ns1 := testPodMultinetwork{
name: "blue-pod-5",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod5ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod5ns1.namespace, pod5ns1.name)
pod6ns1 := testPodMultinetwork{
name: "blue-pod-6",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod6ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod6ns1.namespace, pod6ns1.name)
g.By("5. Get IPs from all six pod's secondary interfaces")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-3")
pod4ns1IPv4, pod4ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-4")
pod5ns1IPv4, pod5ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-5")
pod6ns1IPv4, pod6ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-6")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
exutil.By("7. Create ingress-ipBlock policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err3 := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err3).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingress-ipblock"))
exutil.By("8. Curl should fail after applying policy")
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-4", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-5", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-2", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-3", pod1ns1IPv4, pod1ns1IPv6)
exutil.By("9. Delete ingress-ipBlock policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "ingress-ipblock", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
})
| |||||
test case
|
openshift/openshift-tests-private
|
ed974f40-0796-42a4-bd49-d27661958baf
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41173-MultiNetworkPolicy egress ipblock. [Disruptive]
|
['"fmt"', '"net"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41173-MultiNetworkPolicy egress ipblock. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
patchSResource := "networks.operator.openshift.io/cluster"
pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml")
netAttachDefFile := filepath.Join(buildPruningBaseDir, "ipblock-NAD.yaml")
policyFile := filepath.Join(buildPruningBaseDir, "egress-ipBlock.yaml")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
exutil.By("1. Enable MacvlanNetworkpolicy in the cluster")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("2. Create a namespace")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41173a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
exutil.By("3. Create MultiNetworkPolicy-NAD in ns1")
policyErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns1).Execute()
o.Expect(policyErr).NotTo(o.HaveOccurred())
nadOutput, nadErr := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(nadErr).NotTo(o.HaveOccurred())
o.Expect(nadOutput).To(o.ContainSubstring("ipblock-net"))
exutil.By("4. Create six pods for egress ip range policy testing")
pod1ns1 := testPodMultinetwork{
name: "blue-pod-1",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod1ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
pod2ns1 := testPodMultinetwork{
name: "blue-pod-2",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod2ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
pod3ns1 := testPodMultinetwork{
name: "blue-pod-3",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod3ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
pod4ns1 := testPodMultinetwork{
name: "blue-pod-4",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod4ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod4ns1.namespace, pod4ns1.name)
pod5ns1 := testPodMultinetwork{
name: "blue-pod-5",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod5ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod5ns1.namespace, pod5ns1.name)
pod6ns1 := testPodMultinetwork{
name: "blue-pod-6",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod6ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod6ns1.namespace, pod6ns1.name)
exutil.By("5. Get IPs from all six pod's secondary interfaces")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-3")
pod4ns1IPv4, pod4ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-4")
pod5ns1IPv4, pod5ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-5")
pod6ns1IPv4, pod6ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-6")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
exutil.By("7. Create egress-ipBlock policy in ns1")
policyCreateErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
o.Expect(policyCreateErr).NotTo(o.HaveOccurred())
policyCreOutput, policyCreErr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyCreErr).NotTo(o.HaveOccurred())
o.Expect(policyCreOutput).To(o.ContainSubstring("egress-ipblock"))
exutil.By("8. curl should fail for ip range 192.168.0.4-192.168.0.6 after applying policy")
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-2", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-2", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-3", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-4", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-5", pod6ns1IPv4, pod6ns1IPv6)
exutil.By("9. Delete egress-ipBlock policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "egress-ipblock", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
})
| |||||
test case
|
openshift/openshift-tests-private
|
3ec8abe7-8e00-499e-87e4-b83f4239f6e6
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41607-Multinetworkpolicy filter-with-tcpport [Disruptive]
|
['"fmt"', '"net"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41607-Multinetworkpolicy filter-with-tcpport [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
patchSResource := "networks.operator.openshift.io/cluster"
tcpportPod := filepath.Join(buildPruningBaseDir, "tcpport-pod.yaml")
netAttachDefFile := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-NAD1.yaml")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml")
policyFile := filepath.Join(buildPruningBaseDir, "policy-tcpport.yaml")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
exutil.By("1. Enable MacvlanNetworkpolicy in the cluster")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("2. Create a namespace")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns := "project41607"
defer oc.AsAdmin().Run("delete").Args("project", ns, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
exutil.By("3. Create MultiNetworkPolicy-NAD in ns")
policyErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns).Execute()
o.Expect(policyErr).NotTo(o.HaveOccurred())
nadOutput, nadErr := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns).Output()
o.Expect(nadErr).NotTo(o.HaveOccurred())
o.Expect(nadOutput).To(o.ContainSubstring("macvlan-nad1"))
exutil.By("4. Create a tcpport pods for ingress tcp port testing")
createResourceFromFile(oc, ns, tcpportPod)
podErr := waitForPodWithLabelReady(oc, ns, "name=tcp-port-pod")
exutil.AssertWaitPollNoErr(podErr, "tcpportPod is not running")
podIPv4, _ := getPodMultiNetwork(oc, ns, "tcp-port-pod")
exutil.By("5. Create a test pods for ingress tcp port testing")
pod1ns1 := testPodMultinetwork{
name: "blue-pod-1",
namespace: ns,
nodename: "worker-1",
nadname: "macvlan-nad1",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod1ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("6. curl should pass before applying policy")
_, curl1Err := e2eoutput.RunHostCmd(ns, "blue-pod-1", "curl --connect-timeout 5 -s "+net.JoinHostPort(podIPv4, "8888"))
o.Expect(curl1Err).NotTo(o.HaveOccurred())
exutil.By("7. Create tcpport policy in ns")
policyCreateErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns).Execute()
o.Expect(policyCreateErr).NotTo(o.HaveOccurred())
policyCreOutput, policyCreErr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output()
o.Expect(policyCreErr).NotTo(o.HaveOccurred())
o.Expect(policyCreOutput).To(o.ContainSubstring("tcp-port"))
exutil.By("8. One curl should fail before applying policy")
_, curl2Err := e2eoutput.RunHostCmd(ns, "blue-pod-1", "curl --connect-timeout 5 -s "+net.JoinHostPort(podIPv4, "8888"))
o.Expect(curl2Err).To(o.HaveOccurred())
exutil.By("9. Delete tcp-port policy in ns")
removeResource(oc, true, true, "multi-networkpolicy", "tcp-port", "-n", ns)
exutil.By("10. curl should pass after deleting policy")
_, curl3Err := e2eoutput.RunHostCmd(ns, "blue-pod-1", "curl --connect-timeout 5 -s "+net.JoinHostPort(podIPv4, "8888"))
o.Expect(curl3Err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
8557ebb9-0c9a-4821-91e1-6f7a020b639c
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-55818-Rules are not removed after disabling multinetworkpolicy. [Disruptive]
|
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-55818-Rules are not removed after disabling multinetworkpolicy. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
//https://issues.redhat.com/browse/OCPBUGS-977: Rules are not removed after disabling multinetworkpolicy
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "creat-ten-rules.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41171a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41171b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("3. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("4. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("5. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("7. Create egress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
policyList := []string{
"egress-allow-same-podselector-with-same-namespaceselector1",
"egress-allow-same-podselector-with-same-namespaceselector2",
"egress-allow-same-podselector-with-same-namespaceselector3",
"egress-allow-same-podselector-with-same-namespaceselector4",
"egress-allow-same-podselector-with-same-namespaceselector5",
"egress-allow-same-podselector-with-same-namespaceselector6",
"egress-allow-same-podselector-with-same-namespaceselector7",
"egress-allow-same-podselector-with-same-namespaceselector8",
"egress-allow-same-podselector-with-same-namespaceselector9",
"egress-allow-same-podselector-with-same-namespaceselector10",
}
for _, policyRule := range policyList {
e2e.Logf("The policy rule is: %s", policyRule)
o.Expect(output).To(o.ContainSubstring(policyRule))
}
exutil.By("8. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
exutil.By("9. Disable MultiNetworkpolicy in the cluster")
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("10. All curl should pass again after disabling MacvlanNetworkpolicy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
})
| |||||
test
|
openshift/openshift-tests-private
|
7ba9edfa-e453-4557-b728-e3065f0d9741
|
operator
|
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/operator.go
|
package networking
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
var _ = g.Describe("[sig-networking] SDN CNO", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-operator", exutil.KubeConfigPath())
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:jechen-Medium-44954-Newline is added between user CAs and system CAs [Disruptive]", func() {
var (
dirname = "/tmp/OCP-44954"
name = dirname + "OCP-44954-custom"
validity = 3650
caSubj = dirname + "/OU=openshift/CN=admin-kubeconfig-signer-custom"
)
// Generation of a new self-signed CA
g.By("1. Generation of a new self-signed CA")
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
e2e.Logf("Generate the CA private key")
opensslCmd := fmt.Sprintf(`openssl genrsa -out %s-ca.key 4096`, name)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Create the CA certificate")
opensslCmd = fmt.Sprintf(`openssl req -x509 -new -nodes -key %s-ca.key -sha256 -days %d -out %s-ca.crt -subj %s`, name, validity, name, caSubj)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("2. Create a configmap from the CA")
configmapName := "custom-ca"
customCA := "--from-file=ca-bundle.crt=" + name + "-ca.crt"
e2e.Logf("\n customCA is %v", customCA)
_, error := oc.AsAdmin().WithoutNamespace().Run("create").Args("configmap", configmapName, customCA, "-n", "openshift-config").Output()
o.Expect(error).NotTo(o.HaveOccurred())
defer func() {
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", configmapName, "-n", "openshift-config").Output()
o.Expect(error).NotTo(o.HaveOccurred())
}()
g.By("3. Check if configmap is successfully configured in openshift-config namesapce")
err = checkConfigMap(oc, "openshift-config", configmapName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cm %v not found", configmapName))
g.By("4. Patch the configmap created above to proxy/cluster")
defer checkClusterStatus(oc, "Ready")
defer patchResourceAsAdmin(oc, "proxy/cluster", "{\"spec\":{\"trustedCA\":{\"name\":\"\"}}}")
patchResourceAsAdmin(oc, "proxy/cluster", "{\"spec\":{\"trustedCA\":{\"name\":\"custom-ca\"}}}")
g.By("5. Verify that a newline is added between custom user CAs and system CAs")
ns := "openshift-config-managed"
// get system CAs
outputFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns, "trusted-ca-bundle", "-o", "yaml").OutputToFile("trusted-ca")
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(outputFile)
// get the custom user CA in byte array
certArray, err := exec.Command("bash", "-c", "cat "+name+"-ca.crt").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// get the ending portion the custom user CA in byte array
certArrayPart := certArray[len(certArray)-35 : len(certArray)-30]
// grep in the trusted-ca-bundle by the ending portion of the custom user CAs, get 4 lines after
output, err := exec.Command("bash", "-c", "cat "+outputFile+" | grep "+string(certArrayPart)+" -A 4").Output()
e2e.Logf("\nouput string is --->%s<----", string(output))
stringToMatch := string(certArrayPart) + ".+\n.*-----END CERTIFICATE-----\n\n.+\n.+-----BEGIN CERTIFICATE-----"
o.Expect(output).To(o.MatchRegexp(stringToMatch))
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:qiowang-Medium-73156-Verify pod netns iptables usage will be detected and warned [Serial]", func() {
var (
namespace = "openshift-network-operator"
buildPruningBaseDir = exutil.FixturePath("testdata", "router")
testPodYaml = filepath.Join(buildPruningBaseDir, "test-client-pod-withprivilege.yaml")
testPodName = "hello-pod"
)
exutil.By("Create netns privileged pod")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "pod", testPodName).Execute()
oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", ns, "-f", testPodYaml).Execute()
waitPodReady(oc, ns, testPodName)
exutil.By("create iptables in the pod")
cmdErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args(testPodName, "-n", ns, "--", "iptables-nft", "-A", "INPUT", "-p", "tcp", "--dport", "9999", "-j", "DROP").Execute()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
exutil.By("restart iptables-alerter pod which lands on the same node with the test pod, trigger iptables-alerter script")
nodeName, getNodeErr := exutil.GetPodNodeName(oc, ns, testPodName)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
alerterPodName1, getPodNameErr1 := exutil.GetPodName(oc, namespace, "app=iptables-alerter", nodeName)
o.Expect(getPodNameErr1).NotTo(o.HaveOccurred())
o.Expect(alerterPodName1).NotTo(o.BeEmpty())
delPodErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", alerterPodName1, "-n", namespace, "--ignore-not-found=true").Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
exutil.By("check logs of iptables-alerter pod")
alerterPodName2, getPodNameErr2 := exutil.GetPodName(oc, namespace, "app=iptables-alerter", nodeName)
o.Expect(getPodNameErr2).NotTo(o.HaveOccurred())
o.Expect(alerterPodName2).NotTo(o.BeEmpty())
waitPodReady(oc, namespace, alerterPodName2)
podLogs, getLogErr := exutil.WaitAndGetSpecificPodLogs(oc, namespace, "iptables-alerter", alerterPodName2, ns+"/"+testPodName)
o.Expect(getLogErr).NotTo(o.HaveOccurred())
e2e.Logf("The log is : %s", podLogs)
o.Expect(strings.Contains(podLogs, "Logging event for "+ns+"/"+testPodName+" which has iptables rules")).Should(o.BeTrue())
exutil.By("check event for the test namespace")
waitErr := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
events, getEventsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", ns).Output()
o.Expect(getEventsErr).NotTo(o.HaveOccurred())
if !strings.Contains(events, "IPTablesUsageObserved") {
e2e.Logf("Continue to next round")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(waitErr, "Check events failed")
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
0c545838-11c1-4de4-b74a-141b59c1027b
|
Longduration-NonPreRelease-Author:jechen-Medium-44954-Newline is added between user CAs and system CAs [Disruptive]
|
['"fmt"', '"os"', '"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/operator.go
|
g.It("Longduration-NonPreRelease-Author:jechen-Medium-44954-Newline is added between user CAs and system CAs [Disruptive]", func() {
var (
dirname = "/tmp/OCP-44954"
name = dirname + "OCP-44954-custom"
validity = 3650
caSubj = dirname + "/OU=openshift/CN=admin-kubeconfig-signer-custom"
)
// Generation of a new self-signed CA
g.By("1. Generation of a new self-signed CA")
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
e2e.Logf("Generate the CA private key")
opensslCmd := fmt.Sprintf(`openssl genrsa -out %s-ca.key 4096`, name)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Create the CA certificate")
opensslCmd = fmt.Sprintf(`openssl req -x509 -new -nodes -key %s-ca.key -sha256 -days %d -out %s-ca.crt -subj %s`, name, validity, name, caSubj)
err = exec.Command("bash", "-c", opensslCmd).Run()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("2. Create a configmap from the CA")
configmapName := "custom-ca"
customCA := "--from-file=ca-bundle.crt=" + name + "-ca.crt"
e2e.Logf("\n customCA is %v", customCA)
_, error := oc.AsAdmin().WithoutNamespace().Run("create").Args("configmap", configmapName, customCA, "-n", "openshift-config").Output()
o.Expect(error).NotTo(o.HaveOccurred())
defer func() {
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", configmapName, "-n", "openshift-config").Output()
o.Expect(error).NotTo(o.HaveOccurred())
}()
g.By("3. Check if configmap is successfully configured in openshift-config namesapce")
err = checkConfigMap(oc, "openshift-config", configmapName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cm %v not found", configmapName))
g.By("4. Patch the configmap created above to proxy/cluster")
defer checkClusterStatus(oc, "Ready")
defer patchResourceAsAdmin(oc, "proxy/cluster", "{\"spec\":{\"trustedCA\":{\"name\":\"\"}}}")
patchResourceAsAdmin(oc, "proxy/cluster", "{\"spec\":{\"trustedCA\":{\"name\":\"custom-ca\"}}}")
g.By("5. Verify that a newline is added between custom user CAs and system CAs")
ns := "openshift-config-managed"
// get system CAs
outputFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", ns, "trusted-ca-bundle", "-o", "yaml").OutputToFile("trusted-ca")
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(outputFile)
// get the custom user CA in byte array
certArray, err := exec.Command("bash", "-c", "cat "+name+"-ca.crt").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// get the ending portion the custom user CA in byte array
certArrayPart := certArray[len(certArray)-35 : len(certArray)-30]
// grep in the trusted-ca-bundle by the ending portion of the custom user CAs, get 4 lines after
output, err := exec.Command("bash", "-c", "cat "+outputFile+" | grep "+string(certArrayPart)+" -A 4").Output()
e2e.Logf("\nouput string is --->%s<----", string(output))
stringToMatch := string(certArrayPart) + ".+\n.*-----END CERTIFICATE-----\n\n.+\n.+-----BEGIN CERTIFICATE-----"
o.Expect(output).To(o.MatchRegexp(stringToMatch))
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
5092ce25-8f29-4677-8612-bc5969634baf
|
Author:qiowang-Medium-73156-Verify pod netns iptables usage will be detected and warned [Serial]
|
['"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/operator.go
|
g.It("Author:qiowang-Medium-73156-Verify pod netns iptables usage will be detected and warned [Serial]", func() {
var (
namespace = "openshift-network-operator"
buildPruningBaseDir = exutil.FixturePath("testdata", "router")
testPodYaml = filepath.Join(buildPruningBaseDir, "test-client-pod-withprivilege.yaml")
testPodName = "hello-pod"
)
exutil.By("Create netns privileged pod")
ns := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "pod", testPodName).Execute()
oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", ns, "-f", testPodYaml).Execute()
waitPodReady(oc, ns, testPodName)
exutil.By("create iptables in the pod")
cmdErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args(testPodName, "-n", ns, "--", "iptables-nft", "-A", "INPUT", "-p", "tcp", "--dport", "9999", "-j", "DROP").Execute()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
exutil.By("restart iptables-alerter pod which lands on the same node with the test pod, trigger iptables-alerter script")
nodeName, getNodeErr := exutil.GetPodNodeName(oc, ns, testPodName)
o.Expect(getNodeErr).NotTo(o.HaveOccurred())
alerterPodName1, getPodNameErr1 := exutil.GetPodName(oc, namespace, "app=iptables-alerter", nodeName)
o.Expect(getPodNameErr1).NotTo(o.HaveOccurred())
o.Expect(alerterPodName1).NotTo(o.BeEmpty())
delPodErr := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", alerterPodName1, "-n", namespace, "--ignore-not-found=true").Execute()
o.Expect(delPodErr).NotTo(o.HaveOccurred())
exutil.By("check logs of iptables-alerter pod")
alerterPodName2, getPodNameErr2 := exutil.GetPodName(oc, namespace, "app=iptables-alerter", nodeName)
o.Expect(getPodNameErr2).NotTo(o.HaveOccurred())
o.Expect(alerterPodName2).NotTo(o.BeEmpty())
waitPodReady(oc, namespace, alerterPodName2)
podLogs, getLogErr := exutil.WaitAndGetSpecificPodLogs(oc, namespace, "iptables-alerter", alerterPodName2, ns+"/"+testPodName)
o.Expect(getLogErr).NotTo(o.HaveOccurred())
e2e.Logf("The log is : %s", podLogs)
o.Expect(strings.Contains(podLogs, "Logging event for "+ns+"/"+testPodName+" which has iptables rules")).Should(o.BeTrue())
exutil.By("check event for the test namespace")
waitErr := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
events, getEventsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", ns).Output()
o.Expect(getEventsErr).NotTo(o.HaveOccurred())
if !strings.Contains(events, "IPTablesUsageObserved") {
e2e.Logf("Continue to next round")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(waitErr, "Check events failed")
})
| |||||
file
|
openshift/openshift-tests-private
|
5e6ff4c2-02c3-4093-b9e3-0e73ae493870
|
ovs-hw-offload-util
|
import (
"fmt"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
package networking
import (
"fmt"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
func getHWoffloadPF(oc *exutil.CLI, nodename string) string {
pfName := "ens1f0"
nmConnection, checkLogFileErr := exutil.DebugNodeWithOptionsAndChroot(oc, nodename, []string{"-q"}, "nmcli", "-g", "connection.interface-name", "c", "show", "ovs-if-phys0")
o.Expect(checkLogFileErr).NotTo(o.HaveOccurred())
if !strings.Contains(nmConnection, "no such connection profile") {
re := regexp.MustCompile(`(ens\w+)`)
match := re.FindStringSubmatch(nmConnection)
e2e.Logf("The match result is %v", match)
pfName = match[1]
e2e.Logf("The PF of Offload worker nodes is %v", pfName)
}
return pfName
}
func getOvsHWOffloadWokerNodes(oc *exutil.CLI) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/sriov", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
}
func capturePacktes(oc *exutil.CLI, ns string, pod string, intf string, srcip string) string {
var output string
var err error
if strings.Contains(srcip, ":") {
// if ipv6 address
e2e.Logf("start to capture packetes on pod %s using command 'tcpdump tcp -c 10 -vvv -i %s and src net %s/128`", pod, intf, srcip)
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "bash", "-c",
`timeout --preserve-status 10 tcpdump tcp -c 10 -vvv -i `+fmt.Sprintf("%s", intf)+` and src net `+fmt.Sprintf("%s", srcip)+`/128`).Output()
} else {
e2e.Logf("start to capture packetes on pod %s using command tcpdump tcp -c 10 -vvv -i %s and src net %s/32", pod, intf, srcip)
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "bash", "-c",
`timeout --preserve-status 10 tcpdump tcp -c 10 -vvv -i `+fmt.Sprintf("%s", intf)+` and src net `+fmt.Sprintf("%s", srcip)+`/32`).Output()
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
return output
}
func chkCapturePacketsOnIntf(oc *exutil.CLI, ns string, pod string, intf string, srcip string, expectnum string) {
errCheck := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
capResOnIntf := capturePacktes(oc, ns, pod, intf, srcip)
//e2e.Logf("The capture packtes result is %v", capResOnIntf)
reg := regexp.MustCompile(`(\d+) packets captured`)
match := reg.FindStringSubmatch(capResOnIntf)
pktNum := match[1]
e2e.Logf("captured %s packtes on this interface", pktNum)
if pktNum != expectnum {
e2e.Logf("doesn't capture the expected number packets, trying next round ... ")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "can not capture expected number packets, please check")
}
func getPodVFPresentor(oc *exutil.CLI, ns string, pod string) string {
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-o=jsonpath={.spec.nodeName}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// example:
// #ovs-vsctl --columns=name find interface external_ids:iface-id=z1_hello-rc-1-w56tg
// name : eth1
command := fmt.Sprintf("ovs-vsctl --columns=name find interface external_ids:iface-id=%s_%s", ns, pod)
output, err := exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", command)
e2e.Logf("ovs-vsctl --columns=name find interface external_ids:iface-id=%s_%s", ns, pod)
e2e.Logf("The output is %v", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("name"))
// find the match string with "name : eth1"
matchvalue := regexp.MustCompile(`name\s*:\s*(\S+)`).FindStringSubmatch(output)
e2e.Logf("The VF Presentor is %s just test", matchvalue[1])
o.Expect(matchvalue[1]).ShouldNot(o.BeNil())
return matchvalue[1]
}
func startIperfTraffic(oc *exutil.CLI, ns string, pod string, svrip string, duration string) string {
var output string
var err error
if strings.Contains(svrip, ":") {
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "iperf3", "-V", "-c", svrip, "-t", duration).Output()
} else {
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "iperf3", "-c", svrip, "-t", duration).Output()
}
if err != nil {
e2e.Logf("start iperf traffic failed, the error message is %s", output)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
re := regexp.MustCompile(`(\d+.\d+)\s+Gbits/sec\s+receiver`)
match := re.FindStringSubmatch(output)
bandWidth := match[1]
e2e.Logf("iperf bandwidth %s", bandWidth)
return bandWidth
}
func startIperfTrafficBackground(oc *exutil.CLI, ns string, pod string, svrip string, duration string) {
var err error
e2e.Logf("start iperf traffic in background")
if strings.Contains(svrip, ":") {
// if ipv6 address
_, _, _, err = oc.Run("exec").Args("-n", ns, pod, "-q", "--", "iperf3", "-V", "-c", svrip, "-t", duration).Background()
} else {
_, _, _, err = oc.Run("exec").Args("-n", ns, pod, "-q", "--", "iperf3", "-c", svrip, "-t", duration).Background()
}
o.Expect(err).NotTo(o.HaveOccurred())
//wait for 5 seconds for iperf starting.
time.Sleep(5 * time.Second)
}
// Wait for sriov network policy ready
func waitForOffloadSriovPolicyReady(oc *exutil.CLI, ns string) {
workerNodeList := getOvsHWOffloadWokerNodes(oc)
err := wait.Poll(30*time.Second, 30*time.Minute, func() (bool, error) {
for _, workerNode := range workerNodeList {
nodestatus, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", workerNode, "-n", ns, "-o=jsonpath={.status.syncStatus}").Output()
if err1 != nil {
e2e.Logf("failed to get node %v sriov policy status: %v, retrying...", workerNode, err1)
return false, nil
}
if nodestatus != "Succeeded" {
e2e.Logf("nodes %v sync up not ready yet: %v, retrying...", workerNode, nodestatus)
return false, nil
}
e2e.Logf("nodes %v sync up ready now", workerNode)
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "sriovnetworknodestates is not ready")
}
func chkSriovPoolConfig(oc *exutil.CLI, ns string, sriovpoolname string) bool {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworkpoolconfigs.sriovnetwork.openshift.io", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, sriovpoolname) {
e2e.Logf("sriovnetworkpoolconfigs is not configured")
return false
}
return true
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
ece81929-4580-4426-90a0-32343dd3d0b4
|
getHWoffloadPF
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func getHWoffloadPF(oc *exutil.CLI, nodename string) string {
pfName := "ens1f0"
nmConnection, checkLogFileErr := exutil.DebugNodeWithOptionsAndChroot(oc, nodename, []string{"-q"}, "nmcli", "-g", "connection.interface-name", "c", "show", "ovs-if-phys0")
o.Expect(checkLogFileErr).NotTo(o.HaveOccurred())
if !strings.Contains(nmConnection, "no such connection profile") {
re := regexp.MustCompile(`(ens\w+)`)
match := re.FindStringSubmatch(nmConnection)
e2e.Logf("The match result is %v", match)
pfName = match[1]
e2e.Logf("The PF of Offload worker nodes is %v", pfName)
}
return pfName
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a7fc9611-cfa6-405d-8bd1-2496b648096e
|
getOvsHWOffloadWokerNodes
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func getOvsHWOffloadWokerNodes(oc *exutil.CLI) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/sriov", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
77e2c1d1-7050-4a51-8d4c-2b2491b6881c
|
capturePacktes
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func capturePacktes(oc *exutil.CLI, ns string, pod string, intf string, srcip string) string {
var output string
var err error
if strings.Contains(srcip, ":") {
// if ipv6 address
e2e.Logf("start to capture packetes on pod %s using command 'tcpdump tcp -c 10 -vvv -i %s and src net %s/128`", pod, intf, srcip)
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "bash", "-c",
`timeout --preserve-status 10 tcpdump tcp -c 10 -vvv -i `+fmt.Sprintf("%s", intf)+` and src net `+fmt.Sprintf("%s", srcip)+`/128`).Output()
} else {
e2e.Logf("start to capture packetes on pod %s using command tcpdump tcp -c 10 -vvv -i %s and src net %s/32", pod, intf, srcip)
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "bash", "-c",
`timeout --preserve-status 10 tcpdump tcp -c 10 -vvv -i `+fmt.Sprintf("%s", intf)+` and src net `+fmt.Sprintf("%s", srcip)+`/32`).Output()
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
return output
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d057e15e-25ed-4c1f-a806-d9518151b628
|
chkCapturePacketsOnIntf
|
['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func chkCapturePacketsOnIntf(oc *exutil.CLI, ns string, pod string, intf string, srcip string, expectnum string) {
errCheck := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
capResOnIntf := capturePacktes(oc, ns, pod, intf, srcip)
//e2e.Logf("The capture packtes result is %v", capResOnIntf)
reg := regexp.MustCompile(`(\d+) packets captured`)
match := reg.FindStringSubmatch(capResOnIntf)
pktNum := match[1]
e2e.Logf("captured %s packtes on this interface", pktNum)
if pktNum != expectnum {
e2e.Logf("doesn't capture the expected number packets, trying next round ... ")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "can not capture expected number packets, please check")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
f3eb53c8-452e-43cf-b570-5ff9631d46b6
|
getPodVFPresentor
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func getPodVFPresentor(oc *exutil.CLI, ns string, pod string) string {
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-o=jsonpath={.spec.nodeName}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// example:
// #ovs-vsctl --columns=name find interface external_ids:iface-id=z1_hello-rc-1-w56tg
// name : eth1
command := fmt.Sprintf("ovs-vsctl --columns=name find interface external_ids:iface-id=%s_%s", ns, pod)
output, err := exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", command)
e2e.Logf("ovs-vsctl --columns=name find interface external_ids:iface-id=%s_%s", ns, pod)
e2e.Logf("The output is %v", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("name"))
// find the match string with "name : eth1"
matchvalue := regexp.MustCompile(`name\s*:\s*(\S+)`).FindStringSubmatch(output)
e2e.Logf("The VF Presentor is %s just test", matchvalue[1])
o.Expect(matchvalue[1]).ShouldNot(o.BeNil())
return matchvalue[1]
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
13bbb982-70a9-4739-9e04-1eb21e39ff01
|
startIperfTraffic
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func startIperfTraffic(oc *exutil.CLI, ns string, pod string, svrip string, duration string) string {
var output string
var err error
if strings.Contains(svrip, ":") {
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "iperf3", "-V", "-c", svrip, "-t", duration).Output()
} else {
output, err = oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, pod, "iperf3", "-c", svrip, "-t", duration).Output()
}
if err != nil {
e2e.Logf("start iperf traffic failed, the error message is %s", output)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
re := regexp.MustCompile(`(\d+.\d+)\s+Gbits/sec\s+receiver`)
match := re.FindStringSubmatch(output)
bandWidth := match[1]
e2e.Logf("iperf bandwidth %s", bandWidth)
return bandWidth
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
b05241c7-6527-4b95-acd5-26a9c5cf2c84
|
startIperfTrafficBackground
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func startIperfTrafficBackground(oc *exutil.CLI, ns string, pod string, svrip string, duration string) {
var err error
e2e.Logf("start iperf traffic in background")
if strings.Contains(svrip, ":") {
// if ipv6 address
_, _, _, err = oc.Run("exec").Args("-n", ns, pod, "-q", "--", "iperf3", "-V", "-c", svrip, "-t", duration).Background()
} else {
_, _, _, err = oc.Run("exec").Args("-n", ns, pod, "-q", "--", "iperf3", "-c", svrip, "-t", duration).Background()
}
o.Expect(err).NotTo(o.HaveOccurred())
//wait for 5 seconds for iperf starting.
time.Sleep(5 * time.Second)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
856734d1-2547-4d9a-9fa5-15766744f63d
|
waitForOffloadSriovPolicyReady
|
['"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func waitForOffloadSriovPolicyReady(oc *exutil.CLI, ns string) {
workerNodeList := getOvsHWOffloadWokerNodes(oc)
err := wait.Poll(30*time.Second, 30*time.Minute, func() (bool, error) {
for _, workerNode := range workerNodeList {
nodestatus, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", workerNode, "-n", ns, "-o=jsonpath={.status.syncStatus}").Output()
if err1 != nil {
e2e.Logf("failed to get node %v sriov policy status: %v, retrying...", workerNode, err1)
return false, nil
}
if nodestatus != "Succeeded" {
e2e.Logf("nodes %v sync up not ready yet: %v, retrying...", workerNode, nodestatus)
return false, nil
}
e2e.Logf("nodes %v sync up ready now", workerNode)
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "sriovnetworknodestates is not ready")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
6cb73d4e-e4d6-4e70-bbcf-dddd5e12685b
|
chkSriovPoolConfig
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload-util.go
|
func chkSriovPoolConfig(oc *exutil.CLI, ns string, sriovpoolname string) bool {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworkpoolconfigs.sriovnetwork.openshift.io", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, sriovpoolname) {
e2e.Logf("sriovnetworkpoolconfigs is not configured")
return false
}
return true
}
|
networking
| ||||
test
|
openshift/openshift-tests-private
|
6d576535-2735-4157-8df9-5c8885d88706
|
ovs-hw-offload
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload.go
|
package networking
import (
"context"
"fmt"
"path/filepath"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN ovs hardware offload", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("ovsoffload-"+getRandomString(), exutil.KubeConfigPath())
//deviceID = "101d"
vendorID = "15b3"
sriovOpNs = "openshift-sriov-network-operator"
sriovPoolConfigName = "sriovnetworkpoolconfig-offload"
networkBaseDir string
sriovBaseDir string
iperfServerTmp string
iperfClientTmp string
iperfNormalServerTmp string
iperfNormalClientTmp string
iperfSvcTmp string
iperfServerTmp_v6 string
iperfNormalServerTmp_v6 string
iperfSvcTmp_v6 string
ipStackType string
)
g.BeforeEach(func() {
// for now skip sriov cases in temp in order to avoid cases always show failed in CI since sriov operator is not setup . will add install operator function after that
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), "openshift-sriov-network-operator", metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
g.Skip("the cluster does not install sriov operator")
}
}
if !chkSriovPoolConfig(oc, sriovOpNs, sriovPoolConfigName) {
g.Skip("the cluster does not configure sriovnetworkpoolconfigs. skip this testing!")
}
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
iperfServerTmp = filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfClientTmp = filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfNormalServerTmp = filepath.Join(sriovBaseDir, "iperf-server-normal-template.json")
iperfNormalClientTmp = filepath.Join(sriovBaseDir, "iperf-rc-normal-template.json")
iperfSvcTmp = filepath.Join(sriovBaseDir, "iperf-service-template.json")
iperfServerTmp_v6 = filepath.Join(sriovBaseDir, "iperf-server-ipv6-template.json")
iperfNormalServerTmp_v6 = filepath.Join(sriovBaseDir, "iperf-server-ipv6-normal-template.json")
iperfSvcTmp_v6 = filepath.Join(sriovBaseDir, "iperf-service-ipv6-template.json")
ipStackType = checkIPStackType(oc)
e2e.Logf("This cluster is %s OCP", ipStackType)
})
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45390-pod to pod traffic in different hosts can work well with ovs hw offload as default network [Disruptive]", func() {
var (
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
pfName = getHWoffloadPF(oc, workerNodeList[0])
hostnwPod0Name = "hostnw-pod-45390-worker0"
hostnwPod1Name = "hostnw-pod-45390-worker1"
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod with ovs hwoffload vf on worker0
iperfServerPod.create(oc, "PODNAME="+iperfServerPod.name, "NAMESPACE="+iperfServerPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPod.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIP := getPodIPv4(oc, oc.Namespace(), iperfServerPod.name)
iperfServerVF := getPodVFPresentor(oc, iperfServerPod.namespace, iperfServerPod.name)
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod with ovs hwoffload vf on worker1
defer iperfClientPod.delete(oc)
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[1],
"NETTYPE="+offloadNetType)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[1])
iperfClientPod.name = iperfClientName
defer iperfClientPod.delete(oc)
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
exutil.By("5) ########### Create iperf Pods with normal default interface ##########")
iperfServerPod1 := sriovNetResource{
name: "iperf-server-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0
iperfServerPod1.create(oc, "PODNAME="+iperfServerPod1.name, "NAMESPACE="+iperfServerPod1.namespace, "NODENAME="+workerNodeList[0])
defer iperfServerPod1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIP1 := getPodIPv4(oc, oc.Namespace(), iperfServerPod1.name)
iperfClientPod1 := sriovNetResource{
name: "iperf-rc-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalClientTmp,
kind: "pod",
}
//create iperf client pod with normal default interface on worker1
iperfClientPod1.create(oc, "PODNAME="+iperfClientPod1.name, "NAMESPACE="+iperfClientPod1.namespace, "NODENAME="+workerNodeList[1])
defer iperfClientPod1.delete(oc)
iperfClientName1, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-normal", workerNodeList[1])
iperfClientPod1.name = iperfClientName1
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy4 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-normal")
exutil.AssertWaitPollNoErr(errPodRdy4, fmt.Sprintf("iperf client pod isn't ready"))
exutil.By("6) ########### Create hostnetwork Pods to capture packets ##########")
//create hostnetwork pod on worker0 and worker1 to capture packets
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
hostnwPod1 := sriovNetResource{
name: hostnwPod1Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pods on worker0 and worker1 to capture packets
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
hostnwPod1.create(oc, "PODNAME="+hostnwPod1.name, "NODENAME="+workerNodeList[1])
defer hostnwPod1.delete(oc)
errPodRdy5 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy5, fmt.Sprintf("hostnetwork pod isn't ready"))
errPodRdy6 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy6, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("7) ########### Check Bandwidth between iperf client and iperf server pods ##########")
// enable hardware offload should improve the performance
// get throughput on pods which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get throughput on pods with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfServerIP1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("8) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPod.namespace, iperfServerPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("9) ########### Create ipv6 iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerPodv6 := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload vf on worker0
iperfServerPodv6.create(oc, "PODNAME="+iperfServerPodv6.name, "NAMESPACE="+iperfServerPodv6.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPodv6.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIPv6 := getPodIPv6(oc, oc.Namespace(), iperfServerPodv6.name, "dualstack")
iperfServerVF := getPodVFPresentor(oc, iperfServerPodv6.namespace, iperfServerPodv6.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("10) ########### Create ipv6 iperf Pods with normal default interface ##########")
iperfServerPodv6_1 := sriovNetResource{
name: "iperf-server-normal-ipv6",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp_v6,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0
iperfServerPodv6_1.create(oc, "PODNAME="+iperfServerPodv6_1.name, "NAMESPACE="+iperfServerPodv6_1.namespace, "NODENAME="+workerNodeList[0])
defer iperfServerPodv6_1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIPv6_1 := getPodIPv6(oc, oc.Namespace(), iperfServerPodv6_1.name, "dualstack")
exutil.By("11) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
// enable hardware offload should improve the performance
// get throughput on pods which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get throughput on pods with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfServerIPv6_1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("12) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPodv6.namespace, iperfServerPodv6.name, "eth0", iperfClientIPv6, "10")
}
})
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45388-pod to pod traffic in same host can work well with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
hostnwPod0Name = "hostnw-pod-45388-worker0"
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerTmp := filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfServerPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod on worker0
iperfServerPod.create(oc, "PODNAME="+iperfServerPod.name, "NAMESPACE="+iperfServerPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPod.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIP := getPodIPv4(oc, oc.Namespace(), iperfServerPod.name)
iperfServerVF := getPodVFPresentor(oc, iperfServerPod.namespace, iperfServerPod.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod on worker0
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[0],
"NETTYPE="+offloadNetType)
defer iperfClientPod.delete(oc)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[0])
iperfClientPod.name = iperfClientName
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
exutil.By("5) ########### Create hostnetwork Pods to capture packets ##########")
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pod on worker0 to capture packets
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("6) ########### Check Bandwidth between iperf client and iperf server pods ##########")
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "20s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("7) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPod.namespace, iperfServerPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("8) ########### Create ipv6 perf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerPodv6 := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload vf on worker0
iperfServerPodv6.create(oc, "PODNAME="+iperfServerPodv6.name, "NAMESPACE="+iperfServerPodv6.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPodv6.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIPv6 := getPodIPv6(oc, oc.Namespace(), iperfServerPodv6.name, "dualstack")
iperfServerVF := getPodVFPresentor(oc, iperfServerPodv6.namespace, iperfServerPodv6.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("9) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
// enable hardware offload should improve the performance
// get throughput on pods which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("10) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPodv6.namespace, iperfServerPodv6.name, "eth0", iperfClientIPv6, "10")
}
})
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45396-pod to service traffic via cluster ip between diffrent hosts can work well with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
hostnwPod0Name = "hostnw-pod-45396-worker0"
hostnwPod1Name = "hostnw-pod-45396-worker1"
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf Server clusterip service and client Pod on diffenrent hosts and attach sriov VF as default interface ##########")
iperfSvc := sriovNetResource{
name: "iperf-clusterip-service",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod with ovs hwoffload VF on worker0 and create clusterip service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP := getSvcIPv4(oc, oc.Namespace(), iperfSvc.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPod.namespace, iperfSvcPod.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod with ovs hw offload VF on worker1
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[1],
"NETTYPE="+offloadNetType)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[1])
iperfClientPod.name = iperfClientName
defer iperfClientPod.delete(oc)
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
exutil.By("5) ########### Create iperf clusterip service and iperf client pod with normal default interface ##########")
iperfSvc1 := sriovNetResource{
name: "iperf-service-normal",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod1 := sriovNetResource{
name: "iperf-server-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0 and create clusterip service
iperfSvcPod1.create(oc, "PODNAME="+iperfSvcPod1.name, "NAMESPACE="+iperfSvcPod1.namespace, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc1.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc1.name, "PODNAME="+iperfSvcPod1.name, "NAMESPACE="+iperfSvc1.namespace)
defer iperfSvc1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP1 := getSvcIPv4(oc, oc.Namespace(), iperfSvc1.name)
iperfClientPod1 := sriovNetResource{
name: "iperf-rc-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalClientTmp,
kind: "pod",
}
//create iperf client pod with normal default interface on worker1
iperfClientPod1.create(oc, "PODNAME="+iperfClientPod1.name, "NAMESPACE="+iperfClientPod1.namespace, "NODENAME="+workerNodeList[1])
defer iperfClientPod1.delete(oc)
iperfClientName1, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-normal", workerNodeList[1])
iperfClientPod1.name = iperfClientName1
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy4 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-normal")
exutil.AssertWaitPollNoErr(errPodRdy4, fmt.Sprintf("iperf client pod isn't ready"))
exutil.By("6) ########### Check Bandwidth between iperf client and iperf clusterip service ##########")
// enable hardware offload should improve the performance
// get bandwidth on iperf client which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get bandwidth on iperf client with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIP1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("7) ########### Create hostnetwork Pods to capture packets ##########")
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
hostnwPod1 := sriovNetResource{
name: hostnwPod1Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pods on worker0 and worker1 to capture packets
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
hostnwPod1.create(oc, "PODNAME="+hostnwPod1.name, "NODENAME="+workerNodeList[1])
defer hostnwPod1.delete(oc)
errPodRdy5 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy5, fmt.Sprintf("hostnetwork pod isn't ready"))
errPodRdy6 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy6, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("8) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPod.namespace, iperfSvcPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("4) ########### Create ipv6 iperf Server clusterip service and client Pod on diffenrent hosts and attach sriov VF as default interface ##########")
iperfSvcv6 := sriovNetResource{
name: "iperf-clusterip-service-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPodv6 := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload VF on worker0 and create clusterip service
iperfSvcPodv6.create(oc, "PODNAME="+iperfSvcPodv6.name, "NAMESPACE="+iperfSvcPodv6.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPodv6.delete(oc)
iperfSvcv6.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvcv6.name, "PODNAME="+iperfSvcPodv6.name, "NAMESPACE="+iperfSvcv6.namespace)
defer iperfSvcv6.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6 := getSvcIPv6(oc, oc.Namespace(), iperfSvcv6.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPodv6.namespace, iperfSvcPodv6.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("5) ########### Create ipv6 iperf clusterip service and iperf client pod with normal default interface ##########")
iperfSvcv6_1 := sriovNetResource{
name: "iperf-clusterip-service-normal-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPodv6_1 := sriovNetResource{
name: "iperf-server-normal-ipv6",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp_v6,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0 and create clusterip service
iperfSvcPodv6_1.create(oc, "PODNAME="+iperfSvcPodv6_1.name, "NAMESPACE="+iperfSvcPodv6_1.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPodv6_1.delete(oc)
iperfSvcv6_1.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvcv6_1.name, "PODNAME="+iperfSvcPodv6_1.name, "NAMESPACE="+iperfSvcv6_1.namespace)
defer iperfSvcv6_1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6_1 := getSvcIPv6(oc, oc.Namespace(), iperfSvcv6_1.name)
exutil.By("6) ########### Check ipv6 traffic Bandwidth between iperf client and iperf clusterip service ##########")
// enable hardware offload should improve the performance
// get bandwidth on iperf client which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get bandwidth on iperf client with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIPv6_1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("8) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPodv6.namespace, iperfSvcPodv6.name, "eth0", iperfClientIPv6, "10")
}
})
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45395-pod to service traffic via cluster ip in same host can work well with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
hostnwPod0Name = "hostnw-pod-45388-worker0"
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf clusterip service and client Pod on same host and attach sriov VF as default interface ##########")
iperfSvcTmp := filepath.Join(sriovBaseDir, "iperf-service-template.json")
iperfServerTmp := filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfSvc := sriovNetResource{
name: "iperf-clusterip-service",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod on worker0 and create clusterip service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP := getSvcIPv4(oc, oc.Namespace(), iperfSvc.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPod.namespace, iperfSvcPod.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod on worker0
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[0],
"NETTYPE="+offloadNetType)
defer iperfClientPod.delete(oc)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[0])
iperfClientPod.name = iperfClientName
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
exutil.By("5) ########### Create hostnetwork Pods to capture packets ##########")
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pod on worker0
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("6) ########### Check Bandwidth between iperf client and iperf server pods ##########")
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "20s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("7) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPod.namespace, iperfSvcPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("8) ########### Create ipv6 iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfSvc := sriovNetResource{
name: "iperf-clusterip-service-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload VF on worker0 and create clusterip service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6 := getSvcIPv6(oc, oc.Namespace(), iperfSvc.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPod.namespace, iperfSvcPod.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("9) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "20s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("10) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPod.namespace, iperfSvcPod.name, "eth0", iperfClientIPv6, "10")
}
})
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-46018-test pod to service traffic via nodeport with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf nodeport service and create 2 client Pods on 2 hosts and attach sriov VF as default interface ##########")
iperfSvcTmp := filepath.Join(sriovBaseDir, "iperf-service-template.json")
iperfServerTmp := filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfSvc := sriovNetResource{
name: "iperf-nodeport-service",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod on worker0 and create nodeport service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"NodePort", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP := getSvcIPv4(oc, oc.Namespace(), iperfSvc.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod1 := sriovNetResource{
name: "iperf-rc-1",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
iperfClientPod2 := sriovNetResource{
name: "iperf-rc-2",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod on worker0
iperfClientPod1.create(oc, "PODNAME="+iperfClientPod1.name, "NAMESPACE="+iperfClientPod1.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[0],
"NETTYPE="+offloadNetType)
defer iperfClientPod1.delete(oc)
iperfClientName1, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-1", workerNodeList[0])
iperfClientPod1.name = iperfClientName1
o.Expect(err).NotTo(o.HaveOccurred())
//create iperf client pod on worker1
iperfClientPod2.create(oc, "PODNAME="+iperfClientPod2.name, "NAMESPACE="+iperfClientPod2.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[1],
"NETTYPE="+offloadNetType)
defer iperfClientPod2.delete(oc)
iperfClientName2, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-2", workerNodeList[1])
iperfClientPod2.name = iperfClientName2
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-1")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-2")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf client pod isn't ready"))
exutil.By("5) ########### Check Bandwidth between iperf client and iperf server pods ##########")
//traffic should pass
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIP, "20s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth1)).Should(o.BeNumerically(">", 0.0))
//traffic should pass
bandWithStr2 := startIperfTraffic(oc, iperfClientPod2.namespace, iperfClientPod2.name, iperfSvcIP, "20s")
bandWidth2, paseFloatErr2 := strconv.ParseFloat(bandWithStr2, 32)
o.Expect(paseFloatErr2).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth2)).Should(o.BeNumerically(">", 0.0))
if ipStackType == "dualstack" {
exutil.By("6) ########### Create ipv6 iperf nodeport service and create 2 client Pods on 2 hosts and attach sriov VF as default interface ##########")
iperfSvc := sriovNetResource{
name: "iperf-nodeport-service-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod on worker0 and create nodeport service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"NodePort", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
//errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6 := getSvcIPv6(oc, oc.Namespace(), iperfSvc.name)
exutil.By("7) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
//traffic should pass
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIPv6, "20s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth1)).Should(o.BeNumerically(">", 0.0))
//traffic should pass
bandWithStr2 := startIperfTraffic(oc, iperfClientPod2.namespace, iperfClientPod2.name, iperfSvcIPv6, "20s")
bandWidth2, paseFloatErr2 := strconv.ParseFloat(bandWithStr2, 32)
o.Expect(paseFloatErr2).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth2)).Should(o.BeNumerically(">", 0.0))
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
279e5aab-1d2c-47a8-89b2-87634ce48e8f
|
NonPreRelease-Longduration-Author:yingwang-Medium-45390-pod to pod traffic in different hosts can work well with ovs hw offload as default network [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload.go
|
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45390-pod to pod traffic in different hosts can work well with ovs hw offload as default network [Disruptive]", func() {
var (
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
pfName = getHWoffloadPF(oc, workerNodeList[0])
hostnwPod0Name = "hostnw-pod-45390-worker0"
hostnwPod1Name = "hostnw-pod-45390-worker1"
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod with ovs hwoffload vf on worker0
iperfServerPod.create(oc, "PODNAME="+iperfServerPod.name, "NAMESPACE="+iperfServerPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPod.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIP := getPodIPv4(oc, oc.Namespace(), iperfServerPod.name)
iperfServerVF := getPodVFPresentor(oc, iperfServerPod.namespace, iperfServerPod.name)
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod with ovs hwoffload vf on worker1
defer iperfClientPod.delete(oc)
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[1],
"NETTYPE="+offloadNetType)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[1])
iperfClientPod.name = iperfClientName
defer iperfClientPod.delete(oc)
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
exutil.By("5) ########### Create iperf Pods with normal default interface ##########")
iperfServerPod1 := sriovNetResource{
name: "iperf-server-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0
iperfServerPod1.create(oc, "PODNAME="+iperfServerPod1.name, "NAMESPACE="+iperfServerPod1.namespace, "NODENAME="+workerNodeList[0])
defer iperfServerPod1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIP1 := getPodIPv4(oc, oc.Namespace(), iperfServerPod1.name)
iperfClientPod1 := sriovNetResource{
name: "iperf-rc-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalClientTmp,
kind: "pod",
}
//create iperf client pod with normal default interface on worker1
iperfClientPod1.create(oc, "PODNAME="+iperfClientPod1.name, "NAMESPACE="+iperfClientPod1.namespace, "NODENAME="+workerNodeList[1])
defer iperfClientPod1.delete(oc)
iperfClientName1, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-normal", workerNodeList[1])
iperfClientPod1.name = iperfClientName1
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy4 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-normal")
exutil.AssertWaitPollNoErr(errPodRdy4, fmt.Sprintf("iperf client pod isn't ready"))
exutil.By("6) ########### Create hostnetwork Pods to capture packets ##########")
//create hostnetwork pod on worker0 and worker1 to capture packets
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
hostnwPod1 := sriovNetResource{
name: hostnwPod1Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pods on worker0 and worker1 to capture packets
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
hostnwPod1.create(oc, "PODNAME="+hostnwPod1.name, "NODENAME="+workerNodeList[1])
defer hostnwPod1.delete(oc)
errPodRdy5 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy5, fmt.Sprintf("hostnetwork pod isn't ready"))
errPodRdy6 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy6, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("7) ########### Check Bandwidth between iperf client and iperf server pods ##########")
// enable hardware offload should improve the performance
// get throughput on pods which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get throughput on pods with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfServerIP1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("8) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPod.namespace, iperfServerPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("9) ########### Create ipv6 iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerPodv6 := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload vf on worker0
iperfServerPodv6.create(oc, "PODNAME="+iperfServerPodv6.name, "NAMESPACE="+iperfServerPodv6.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPodv6.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIPv6 := getPodIPv6(oc, oc.Namespace(), iperfServerPodv6.name, "dualstack")
iperfServerVF := getPodVFPresentor(oc, iperfServerPodv6.namespace, iperfServerPodv6.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("10) ########### Create ipv6 iperf Pods with normal default interface ##########")
iperfServerPodv6_1 := sriovNetResource{
name: "iperf-server-normal-ipv6",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp_v6,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0
iperfServerPodv6_1.create(oc, "PODNAME="+iperfServerPodv6_1.name, "NAMESPACE="+iperfServerPodv6_1.namespace, "NODENAME="+workerNodeList[0])
defer iperfServerPodv6_1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIPv6_1 := getPodIPv6(oc, oc.Namespace(), iperfServerPodv6_1.name, "dualstack")
exutil.By("11) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
// enable hardware offload should improve the performance
// get throughput on pods which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get throughput on pods with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfServerIPv6_1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("12) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPodv6.namespace, iperfServerPodv6.name, "eth0", iperfClientIPv6, "10")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
92172569-6202-4329-94e2-0870af772135
|
NonPreRelease-Longduration-Author:yingwang-Medium-45388-pod to pod traffic in same host can work well with ovs hw offload as default network [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload.go
|
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45388-pod to pod traffic in same host can work well with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
hostnwPod0Name = "hostnw-pod-45388-worker0"
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerTmp := filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfServerPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod on worker0
iperfServerPod.create(oc, "PODNAME="+iperfServerPod.name, "NAMESPACE="+iperfServerPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPod.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIP := getPodIPv4(oc, oc.Namespace(), iperfServerPod.name)
iperfServerVF := getPodVFPresentor(oc, iperfServerPod.namespace, iperfServerPod.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod on worker0
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[0],
"NETTYPE="+offloadNetType)
defer iperfClientPod.delete(oc)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[0])
iperfClientPod.name = iperfClientName
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
exutil.By("5) ########### Create hostnetwork Pods to capture packets ##########")
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pod on worker0 to capture packets
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("6) ########### Check Bandwidth between iperf client and iperf server pods ##########")
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "20s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("7) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPod.namespace, iperfServerPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("8) ########### Create ipv6 perf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfServerPodv6 := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload vf on worker0
iperfServerPodv6.create(oc, "PODNAME="+iperfServerPodv6.name, "NAMESPACE="+iperfServerPodv6.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfServerPodv6.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfServerIPv6 := getPodIPv6(oc, oc.Namespace(), iperfServerPodv6.name, "dualstack")
iperfServerVF := getPodVFPresentor(oc, iperfServerPodv6.namespace, iperfServerPodv6.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("9) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
// enable hardware offload should improve the performance
// get throughput on pods which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("10) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfServerIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfServerPodv6.namespace, iperfServerPodv6.name, "eth0", iperfClientIPv6, "10")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
71cd22d7-0b2b-4659-b032-42a89d44d101
|
NonPreRelease-Longduration-Author:yingwang-Medium-45396-pod to service traffic via cluster ip between diffrent hosts can work well with ovs hw offload as default network [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload.go
|
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45396-pod to service traffic via cluster ip between diffrent hosts can work well with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
hostnwPod0Name = "hostnw-pod-45396-worker0"
hostnwPod1Name = "hostnw-pod-45396-worker1"
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf Server clusterip service and client Pod on diffenrent hosts and attach sriov VF as default interface ##########")
iperfSvc := sriovNetResource{
name: "iperf-clusterip-service",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod with ovs hwoffload VF on worker0 and create clusterip service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP := getSvcIPv4(oc, oc.Namespace(), iperfSvc.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPod.namespace, iperfSvcPod.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod with ovs hw offload VF on worker1
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[1],
"NETTYPE="+offloadNetType)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[1])
iperfClientPod.name = iperfClientName
defer iperfClientPod.delete(oc)
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
exutil.By("5) ########### Create iperf clusterip service and iperf client pod with normal default interface ##########")
iperfSvc1 := sriovNetResource{
name: "iperf-service-normal",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod1 := sriovNetResource{
name: "iperf-server-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0 and create clusterip service
iperfSvcPod1.create(oc, "PODNAME="+iperfSvcPod1.name, "NAMESPACE="+iperfSvcPod1.namespace, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc1.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc1.name, "PODNAME="+iperfSvcPod1.name, "NAMESPACE="+iperfSvc1.namespace)
defer iperfSvc1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP1 := getSvcIPv4(oc, oc.Namespace(), iperfSvc1.name)
iperfClientPod1 := sriovNetResource{
name: "iperf-rc-normal",
namespace: oc.Namespace(),
tempfile: iperfNormalClientTmp,
kind: "pod",
}
//create iperf client pod with normal default interface on worker1
iperfClientPod1.create(oc, "PODNAME="+iperfClientPod1.name, "NAMESPACE="+iperfClientPod1.namespace, "NODENAME="+workerNodeList[1])
defer iperfClientPod1.delete(oc)
iperfClientName1, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-normal", workerNodeList[1])
iperfClientPod1.name = iperfClientName1
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy4 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-normal")
exutil.AssertWaitPollNoErr(errPodRdy4, fmt.Sprintf("iperf client pod isn't ready"))
exutil.By("6) ########### Check Bandwidth between iperf client and iperf clusterip service ##########")
// enable hardware offload should improve the performance
// get bandwidth on iperf client which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get bandwidth on iperf client with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIP1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("7) ########### Create hostnetwork Pods to capture packets ##########")
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
hostnwPod1 := sriovNetResource{
name: hostnwPod1Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pods on worker0 and worker1 to capture packets
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
hostnwPod1.create(oc, "PODNAME="+hostnwPod1.name, "NODENAME="+workerNodeList[1])
defer hostnwPod1.delete(oc)
errPodRdy5 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy5, fmt.Sprintf("hostnetwork pod isn't ready"))
errPodRdy6 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod1.name)
exutil.AssertWaitPollNoErr(errPodRdy6, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("8) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPod.namespace, iperfSvcPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("4) ########### Create ipv6 iperf Server clusterip service and client Pod on diffenrent hosts and attach sriov VF as default interface ##########")
iperfSvcv6 := sriovNetResource{
name: "iperf-clusterip-service-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPodv6 := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload VF on worker0 and create clusterip service
iperfSvcPodv6.create(oc, "PODNAME="+iperfSvcPodv6.name, "NAMESPACE="+iperfSvcPodv6.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPodv6.delete(oc)
iperfSvcv6.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvcv6.name, "PODNAME="+iperfSvcPodv6.name, "NAMESPACE="+iperfSvcv6.namespace)
defer iperfSvcv6.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6 := getSvcIPv6(oc, oc.Namespace(), iperfSvcv6.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPodv6.namespace, iperfSvcPodv6.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("5) ########### Create ipv6 iperf clusterip service and iperf client pod with normal default interface ##########")
iperfSvcv6_1 := sriovNetResource{
name: "iperf-clusterip-service-normal-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPodv6_1 := sriovNetResource{
name: "iperf-server-normal-ipv6",
namespace: oc.Namespace(),
tempfile: iperfNormalServerTmp_v6,
kind: "pod",
}
//create iperf server pod with normal default interface on worker0 and create clusterip service
iperfSvcPodv6_1.create(oc, "PODNAME="+iperfSvcPodv6_1.name, "NAMESPACE="+iperfSvcPodv6_1.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPodv6_1.delete(oc)
iperfSvcv6_1.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvcv6_1.name, "PODNAME="+iperfSvcPodv6_1.name, "NAMESPACE="+iperfSvcv6_1.namespace)
defer iperfSvcv6_1.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-normal-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6_1 := getSvcIPv6(oc, oc.Namespace(), iperfSvcv6_1.name)
exutil.By("6) ########### Check ipv6 traffic Bandwidth between iperf client and iperf clusterip service ##########")
// enable hardware offload should improve the performance
// get bandwidth on iperf client which attached hardware offload enabled VF
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "60s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
// get bandwidth on iperf client with normal default interface
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIPv6_1, "60s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", float64(bandWidth1)))
exutil.By("8) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod1.namespace, hostnwPod1.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPodv6.namespace, iperfSvcPodv6.name, "eth0", iperfClientIPv6, "10")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
9a976524-0d18-4aa8-96d7-9776faf28f86
|
NonPreRelease-Longduration-Author:yingwang-Medium-45395-pod to service traffic via cluster ip in same host can work well with ovs hw offload as default network [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload.go
|
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-45395-pod to service traffic via cluster ip in same host can work well with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
hostnwPod0Name = "hostnw-pod-45388-worker0"
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf clusterip service and client Pod on same host and attach sriov VF as default interface ##########")
iperfSvcTmp := filepath.Join(sriovBaseDir, "iperf-service-template.json")
iperfServerTmp := filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfSvc := sriovNetResource{
name: "iperf-clusterip-service",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod on worker0 and create clusterip service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP := getSvcIPv4(oc, oc.Namespace(), iperfSvc.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPod.namespace, iperfSvcPod.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod := sriovNetResource{
name: "iperf-rc",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod on worker0
iperfClientPod.create(oc, "PODNAME="+iperfClientPod.name, "NAMESPACE="+iperfClientPod.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[0],
"NETTYPE="+offloadNetType)
defer iperfClientPod.delete(oc)
iperfClientName, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc", workerNodeList[0])
iperfClientPod.name = iperfClientName
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
iperfClientIP := getPodIPv4(oc, oc.Namespace(), iperfClientPod.name)
iperfClientVF := getPodVFPresentor(oc, iperfClientPod.namespace, iperfClientPod.name)
exutil.By("5) ########### Create hostnetwork Pods to capture packets ##########")
hostnwPodTmp := filepath.Join(sriovBaseDir, "net-admin-cap-pod-template.yaml")
hostnwPod0 := sriovNetResource{
name: hostnwPod0Name,
namespace: oc.Namespace(),
tempfile: hostnwPodTmp,
kind: "pod",
}
//create hostnetwork pod on worker0
hostnwPod0.create(oc, "PODNAME="+hostnwPod0.name, "NODENAME="+workerNodeList[0])
defer hostnwPod0.delete(oc)
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name="+hostnwPod0.name)
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("hostnetwork pod isn't ready"))
exutil.By("6) ########### Check Bandwidth between iperf client and iperf server pods ##########")
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "20s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("7) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIP, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIP, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIP, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPod.namespace, iperfSvcPod.name, "eth0", iperfClientIP, "10")
if ipStackType == "dualstack" {
exutil.By("8) ########### Create ipv6 iperf Server and client Pod on same host and attach sriov VF as default interface ##########")
iperfSvc := sriovNetResource{
name: "iperf-clusterip-service-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod with ovs hwoffload VF on worker0 and create clusterip service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"ClusterIP", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server-ipv6")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6 := getSvcIPv6(oc, oc.Namespace(), iperfSvc.name)
iperfServerVF := getPodVFPresentor(oc, iperfSvcPod.namespace, iperfSvcPod.name)
iperfClientIPv6 := getPodIPv6(oc, oc.Namespace(), iperfClientPod.name, "dualstack")
exutil.By("9) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
bandWithStr := startIperfTraffic(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "20s")
bandWidth, paseFloatErr := strconv.ParseFloat(bandWithStr, 32)
o.Expect(paseFloatErr).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth)).Should(o.BeNumerically(">", 0.0))
exutil.By("10) ########### Capture packtes on hostnetwork pod ##########")
//send traffic and capture traffic on iperf VF presentor on worker node and iperf server pod
startIperfTrafficBackground(oc, iperfClientPod.namespace, iperfClientPod.name, iperfSvcIPv6, "150s")
// VF presentors should not be able to capture packets after hardware offload take effect(the begining packts can be captured.
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfClientVF, iperfClientIPv6, "0")
chkCapturePacketsOnIntf(oc, hostnwPod0.namespace, hostnwPod0.name, iperfServerVF, iperfClientIPv6, "0")
// iperf server pod should be able to capture packtes
chkCapturePacketsOnIntf(oc, iperfSvcPod.namespace, iperfSvcPod.name, "eth0", iperfClientIPv6, "10")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
59bbda50-d6ea-4c9a-8634-549c479df1b6
|
NonPreRelease-Longduration-Author:yingwang-Medium-46018-test pod to service traffic via nodeport with ovs hw offload as default network [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/ovs-hw-offload.go
|
g.It("NonPreRelease-Longduration-Author:yingwang-Medium-46018-test pod to service traffic via nodeport with ovs hw offload as default network [Disruptive]", func() {
var (
networkBaseDir = exutil.FixturePath("testdata", "networking")
sriovBaseDir = filepath.Join(networkBaseDir, "sriov")
sriovNetPolicyName = "sriovoffloadpolicy"
sriovNetDeviceName = "sriovoffloadnetattchdef"
sriovOpNs = "openshift-sriov-network-operator"
//pfName = "ens1f0"
workerNodeList = getOvsHWOffloadWokerNodes(oc)
pfName = getHWoffloadPF(oc, workerNodeList[0])
)
oc.SetupProject()
exutil.SetNamespacePrivileged(oc, oc.Namespace())
sriovNetPolicyTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadpolicy-template.yaml")
sriovNetPolicy := sriovNetResource{
name: sriovNetPolicyName,
namespace: sriovOpNs,
kind: "SriovNetworkNodePolicy",
tempfile: sriovNetPolicyTmpFile,
}
sriovNetworkAttachTmpFile := filepath.Join(sriovBaseDir, "sriovoffloadnetattchdef-template.yaml")
sriovNetwork := sriovNetResource{
name: sriovNetDeviceName,
namespace: oc.Namespace(),
tempfile: sriovNetworkAttachTmpFile,
kind: "network-attachment-definitions",
}
defaultOffloadNet := oc.Namespace() + "/" + sriovNetwork.name
offloadNetType := "v1.multus-cni.io/default-network"
exutil.By("1) ####### Check openshift-sriov-network-operator is running well ##########")
chkSriovOperatorStatus(oc, sriovOpNs)
exutil.By("2) ####### Check sriov network policy ############")
//check if sriov network policy is created or not. If not, create one.
if !sriovNetPolicy.chkSriovPolicy(oc) {
sriovNetPolicy.create(oc, "VENDOR="+vendorID, "PFNAME="+pfName, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer rmSriovNetworkPolicy(oc, sriovNetPolicy.name, sriovNetPolicy.namespace)
}
waitForOffloadSriovPolicyReady(oc, sriovNetPolicy.namespace)
exutil.By("3) ######### Create sriov network attachment ############")
e2e.Logf("create sriov network attachment via template")
sriovNetwork.create(oc, "NAMESPACE="+oc.Namespace(), "NETNAME="+sriovNetwork.name, "SRIOVNETPOLICY="+sriovNetPolicy.name)
defer sriovNetwork.delete(oc)
exutil.By("4) ########### Create iperf nodeport service and create 2 client Pods on 2 hosts and attach sriov VF as default interface ##########")
iperfSvcTmp := filepath.Join(sriovBaseDir, "iperf-service-template.json")
iperfServerTmp := filepath.Join(sriovBaseDir, "iperf-server-template.json")
iperfSvc := sriovNetResource{
name: "iperf-nodeport-service",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server",
namespace: oc.Namespace(),
tempfile: iperfServerTmp,
kind: "pod",
}
//create iperf server pod on worker0 and create nodeport service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"NodePort", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIP := getSvcIPv4(oc, oc.Namespace(), iperfSvc.name)
iperfClientTmp := filepath.Join(sriovBaseDir, "iperf-rc-template.json")
iperfClientPod1 := sriovNetResource{
name: "iperf-rc-1",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
iperfClientPod2 := sriovNetResource{
name: "iperf-rc-2",
namespace: oc.Namespace(),
tempfile: iperfClientTmp,
kind: "pod",
}
//create iperf client pod on worker0
iperfClientPod1.create(oc, "PODNAME="+iperfClientPod1.name, "NAMESPACE="+iperfClientPod1.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[0],
"NETTYPE="+offloadNetType)
defer iperfClientPod1.delete(oc)
iperfClientName1, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-1", workerNodeList[0])
iperfClientPod1.name = iperfClientName1
o.Expect(err).NotTo(o.HaveOccurred())
//create iperf client pod on worker1
iperfClientPod2.create(oc, "PODNAME="+iperfClientPod2.name, "NAMESPACE="+iperfClientPod2.namespace, "NETNAME="+defaultOffloadNet, "NODENAME="+workerNodeList[1],
"NETTYPE="+offloadNetType)
defer iperfClientPod2.delete(oc)
iperfClientName2, err := exutil.GetPodName(oc, oc.Namespace(), "name=iperf-rc-2", workerNodeList[1])
iperfClientPod2.name = iperfClientName2
o.Expect(err).NotTo(o.HaveOccurred())
errPodRdy2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-1")
exutil.AssertWaitPollNoErr(errPodRdy2, fmt.Sprintf("iperf client pod isn't ready"))
errPodRdy3 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-rc-2")
exutil.AssertWaitPollNoErr(errPodRdy3, fmt.Sprintf("iperf client pod isn't ready"))
exutil.By("5) ########### Check Bandwidth between iperf client and iperf server pods ##########")
//traffic should pass
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIP, "20s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth1)).Should(o.BeNumerically(">", 0.0))
//traffic should pass
bandWithStr2 := startIperfTraffic(oc, iperfClientPod2.namespace, iperfClientPod2.name, iperfSvcIP, "20s")
bandWidth2, paseFloatErr2 := strconv.ParseFloat(bandWithStr2, 32)
o.Expect(paseFloatErr2).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth2)).Should(o.BeNumerically(">", 0.0))
if ipStackType == "dualstack" {
exutil.By("6) ########### Create ipv6 iperf nodeport service and create 2 client Pods on 2 hosts and attach sriov VF as default interface ##########")
iperfSvc := sriovNetResource{
name: "iperf-nodeport-service-ipv6",
namespace: oc.Namespace(),
tempfile: iperfSvcTmp_v6,
kind: "service",
}
iperfSvcPod := sriovNetResource{
name: "iperf-server-ipv6",
namespace: oc.Namespace(),
tempfile: iperfServerTmp_v6,
kind: "pod",
}
//create iperf server pod on worker0 and create nodeport service
iperfSvcPod.create(oc, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvcPod.namespace, "NETNAME="+defaultOffloadNet, "NETTYPE="+offloadNetType, "NODENAME="+workerNodeList[0])
defer iperfSvcPod.delete(oc)
iperfSvc.create(oc, "SVCTYPE="+"NodePort", "SVCNAME="+iperfSvc.name, "PODNAME="+iperfSvcPod.name, "NAMESPACE="+iperfSvc.namespace)
defer iperfSvc.delete(oc)
//errPodRdy1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=iperf-server")
exutil.AssertWaitPollNoErr(errPodRdy1, fmt.Sprintf("iperf server pod isn't ready"))
iperfSvcIPv6 := getSvcIPv6(oc, oc.Namespace(), iperfSvc.name)
exutil.By("7) ########### Check ipv6 traffic Bandwidth between iperf client and iperf server pods ##########")
//traffic should pass
bandWithStr1 := startIperfTraffic(oc, iperfClientPod1.namespace, iperfClientPod1.name, iperfSvcIPv6, "20s")
bandWidth1, paseFloatErr1 := strconv.ParseFloat(bandWithStr1, 32)
o.Expect(paseFloatErr1).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth1)).Should(o.BeNumerically(">", 0.0))
//traffic should pass
bandWithStr2 := startIperfTraffic(oc, iperfClientPod2.namespace, iperfClientPod2.name, iperfSvcIPv6, "20s")
bandWidth2, paseFloatErr2 := strconv.ParseFloat(bandWithStr2, 32)
o.Expect(paseFloatErr2).NotTo(o.HaveOccurred())
o.Expect(float64(bandWidth2)).Should(o.BeNumerically(">", 0.0))
}
})
| |||||
test
|
openshift/openshift-tests-private
|
00f88ca2-2ef8-4819-97d2-66116ae96615
|
sctp-dualstack
|
import (
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
package networking
import (
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN sctp", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-sctp", exutil.KubeConfigPath())
// author: [email protected]
g.It("ROSA-OSD_CCS-Longduration-Author:weliang-Medium-28757-Establish pod to pod SCTP connections. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
g.By("create sctpClientPod")
createResourceFromFile(oc, oc.Namespace(), sctpClientPod)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
g.By("create sctpServerPod")
createResourceFromFile(oc, oc.Namespace(), sctpServerPod)
err2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
ipStackType := checkIPStackType(oc)
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP := getPodIPv4(oc, oc.Namespace(), sctpServerPodName)
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP := getPodIPv6(oc, oc.Namespace(), sctpServerPodName, ipStackType)
g.By("sctpserver pod start to wait for sctp traffic")
oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
})
// author: [email protected]
g.It("ROSA-OSD_CCS-Longduration-NonPreRelease-Author:weliang-Medium-28758-Expose SCTP ClusterIP Services. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
sctpServicev4 = filepath.Join(buildPruningBaseDir, "sctpservicev4.yaml")
sctpServicev6 = filepath.Join(buildPruningBaseDir, "sctpservicev6.yaml")
sctpServiceDualstack = filepath.Join(buildPruningBaseDir, "sctpservicedualstack.yaml")
)
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
g.By("create sctpClientPod")
createResourceFromFile(oc, oc.Namespace(), sctpClientPod)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
g.By("create sctpServerPod")
createResourceFromFile(oc, oc.Namespace(), sctpServerPod)
err2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
ipStackType := checkIPStackType(oc)
if ipStackType == "ipv4single" {
g.By("test ipv4 singlestack cluster")
g.By("create sctpServiceIPv4")
createResourceFromFile(oc, oc.Namespace(), sctpServicev4)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("sctpservice-v4"))
g.By("get service ipv4 address")
sctpServiceIPv4 := getSvcIPv4(oc, oc.Namespace(), "sctpservice-v4")
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err1).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv4+" 30102 --sctp; }")
o.Expect(err3).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err4).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
if ipStackType == "ipv6single" {
g.By("test ipv6 singlestack cluster")
g.By("create sctpServiceIPv4")
createResourceFromFile(oc, oc.Namespace(), sctpServicev6)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("sctpservice-v6"))
g.By("get service ipv6 address")
sctpServiceIPv6, _ := getSvcIP(oc, oc.Namespace(), "sctpservice-v6")
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err1).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv6+" 30102 --sctp; }")
o.Expect(err3).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err4).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
if ipStackType == "dualstack" {
g.By("test ip dualstack cluster")
g.By("create sctpservicedualstack")
createResourceFromFile(oc, oc.Namespace(), sctpServiceDualstack)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("sctpservice-dualstack"))
g.By("get service ipv4 and ipv6 address")
sctpServiceIPv4, sctpServiceIPv6 := getSvcIPdualstack(oc, oc.Namespace(), "sctpservice-dualstack")
g.By("test ipv4 in dualstack cluster")
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err1).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv4+" 30102 --sctp; }")
o.Expect(err3).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err4).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
g.By("test ipv6 in dualstack cluster")
g.By("sctpserver pod start to wait for sctp traffic")
oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err5 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err5).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv6+" 30102 --sctp; }")
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err6 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err6).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
})
// author: [email protected]
g.It("NonPreRelease-PreChkUpgrade-Author:huirwang-Medium-44765-Check the sctp works well after upgrade. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient-upgrade.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver-upgrade.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
ns = "44765-upgrade-ns"
)
g.By("Enable sctp module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
sctpClientPodname := getPodName(oc, ns, "name=sctpclient")[0]
g.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
sctpServerPodName := getPodName(oc, ns, "name=sctpserver")[0]
ipStackType := checkIPStackType(oc)
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP := getPodIPv4(oc, ns, sctpServerPodName)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP := getPodIPv6(oc, ns, sctpServerPodName, ipStackType)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
})
// author: [email protected]
g.It("NonPreRelease-PstChkUpgrade-Author:huirwang-Medium-44765-Check the sctp works well after upgrade. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
ns = "44765-upgrade-ns"
)
g.By("Check if sctp upgrade namespace existed")
//Skip if no 44765-upgrade-ns which means no prepare before upgrade or parepare failed
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", ns).Execute()
if nsErr != nil {
g.Skip("Skip for no namespace 44765-upgrade-ns in post upgrade.")
}
g.By("Get sctp upgrade setup info")
e2e.Logf("The sctp upgrade namespace is %s ", ns)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("namespace", ns, "--ignore-not-found").Execute()
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
sctpClientPodname := getPodName(oc, ns, "name=sctpclient")[0]
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
sctpServerPodName := getPodName(oc, ns, "name=sctpserver")[0]
g.By("Enable sctp module on all workers")
prepareSCTPModule(oc, sctpModule)
ipStackType := checkIPStackType(oc)
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP := getPodIPv4(oc, ns, sctpServerPodName)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP := getPodIPv6(oc, ns, sctpServerPodName, ipStackType)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
})
// author: [email protected]
g.It("ROSA-OSD_CCS-Longduration-NonPreRelease-Author:huirwang-Medium-28759-Expose SCTP NodePort Services. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
sctpServicev4 = filepath.Join(buildPruningBaseDir, "sctpservicev4.yaml")
sctpServicev6 = filepath.Join(buildPruningBaseDir, "sctpservicev6.yaml")
sctpServiceDualstack = filepath.Join(buildPruningBaseDir, "sctpservicedualstack.yaml")
)
exutil.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
exutil.By("Get sctpServerPod node ")
nodeName, err3 := exutil.GetPodNodeName(oc, ns, "sctpserver")
exutil.AssertWaitPollNoErr(err3, "Cannot get sctpSeverpod node name")
ipStackType := checkIPStackType(oc)
var sctpService string
var expectedSctpService string
switch ipStackType {
case "ipv4single":
sctpService = sctpServicev4
expectedSctpService = "sctpservice-v4"
case "ipv6single":
sctpService = sctpServicev6
expectedSctpService = "sctpservice-v6"
case "dualstack":
sctpService = sctpServiceDualstack
expectedSctpService = "sctpservice-dualstack"
}
exutil.By("create sctp service")
createResourceFromFile(oc, oc.Namespace(), sctpService)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(expectedSctpService))
exutil.By("get node port and node ip")
sctpNodePort := getLoadBalancerSvcNodePort(oc, oc.Namespace(), expectedSctpService)
nodeIP1, nodeIP2 := getNodeIP(oc, nodeName)
exutil.By("Verify sctp nodeport service can be accessed")
checkSCTPResultPASS(oc, ns, sctpServerPodName, sctpClientPodname, nodeIP2, sctpNodePort)
if ipStackType == "dualstack" {
exutil.By("Verify sctp nodeport service can be accessed on IPv6")
checkSCTPResultPASS(oc, ns, sctpServerPodName, sctpClientPodname, nodeIP1, sctpNodePort)
}
})
// author: [email protected]
g.It("ConnectedOnly-Author:huirwang-Medium-29645-Networkpolicy allow SCTP Client. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctp/sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctp/sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
defaultDenyPolicy = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
allowSCTPPolicy = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-sctpclient.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
exutil.By("Setting privileges on the namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
ipStackType := checkIPStackType(oc)
exutil.By("Verify sctp server pod can be accessed")
var sctpServerIPv6, sctpServerIPv4, sctpServerIP string
if ipStackType == "dualstack" {
sctpServerIPv6, sctpServerIPv4 = getPodIP(oc, ns, sctpServerPodName)
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
sctpServerIP, _ = getPodIP(oc, ns, sctpServerPodName)
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
exutil.By("create default deny ingress type networkpolicy")
createResourceFromFile(oc, ns, defaultDenyPolicy)
output, err := oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
exutil.By("Verify sctp server pod was blocked")
if ipStackType == "dualstack" {
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, false)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, false)
} else {
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, false)
}
exutil.By("Create allow deny sctp client networkpolicy")
createResourceFromFile(oc, ns, allowSCTPPolicy)
output, err = oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allowsctpclient"))
exutil.By("Verify sctp server pod can be accessed again")
if ipStackType == "dualstack" {
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
ac45bd90-d297-4108-afd0-f64075d3223b
|
ROSA-OSD_CCS-Longduration-Author:weliang-Medium-28757-Establish pod to pod SCTP connections. [Disruptive]
|
['"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
g.It("ROSA-OSD_CCS-Longduration-Author:weliang-Medium-28757-Establish pod to pod SCTP connections. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
g.By("create sctpClientPod")
createResourceFromFile(oc, oc.Namespace(), sctpClientPod)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
g.By("create sctpServerPod")
createResourceFromFile(oc, oc.Namespace(), sctpServerPod)
err2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
ipStackType := checkIPStackType(oc)
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP := getPodIPv4(oc, oc.Namespace(), sctpServerPodName)
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP := getPodIPv6(oc, oc.Namespace(), sctpServerPodName, ipStackType)
g.By("sctpserver pod start to wait for sctp traffic")
oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err1 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err1).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
92b8aa64-335d-4485-8722-235f73b106ec
|
ROSA-OSD_CCS-Longduration-NonPreRelease-Author:weliang-Medium-28758-Expose SCTP ClusterIP Services. [Disruptive]
|
['"path/filepath"', '"strings"', '"time"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
g.It("ROSA-OSD_CCS-Longduration-NonPreRelease-Author:weliang-Medium-28758-Expose SCTP ClusterIP Services. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
sctpServicev4 = filepath.Join(buildPruningBaseDir, "sctpservicev4.yaml")
sctpServicev6 = filepath.Join(buildPruningBaseDir, "sctpservicev6.yaml")
sctpServiceDualstack = filepath.Join(buildPruningBaseDir, "sctpservicedualstack.yaml")
)
g.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
oc.SetupProject()
defer exutil.RecoverNamespaceRestricted(oc, oc.Namespace())
exutil.SetNamespacePrivileged(oc, oc.Namespace())
g.By("create sctpClientPod")
createResourceFromFile(oc, oc.Namespace(), sctpClientPod)
err1 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
g.By("create sctpServerPod")
createResourceFromFile(oc, oc.Namespace(), sctpServerPod)
err2 := waitForPodWithLabelReady(oc, oc.Namespace(), "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
ipStackType := checkIPStackType(oc)
if ipStackType == "ipv4single" {
g.By("test ipv4 singlestack cluster")
g.By("create sctpServiceIPv4")
createResourceFromFile(oc, oc.Namespace(), sctpServicev4)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("sctpservice-v4"))
g.By("get service ipv4 address")
sctpServiceIPv4 := getSvcIPv4(oc, oc.Namespace(), "sctpservice-v4")
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err1).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv4+" 30102 --sctp; }")
o.Expect(err3).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err4).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
if ipStackType == "ipv6single" {
g.By("test ipv6 singlestack cluster")
g.By("create sctpServiceIPv4")
createResourceFromFile(oc, oc.Namespace(), sctpServicev6)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("sctpservice-v6"))
g.By("get service ipv6 address")
sctpServiceIPv6, _ := getSvcIP(oc, oc.Namespace(), "sctpservice-v6")
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err1).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv6+" 30102 --sctp; }")
o.Expect(err3).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err4).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
if ipStackType == "dualstack" {
g.By("test ip dualstack cluster")
g.By("create sctpservicedualstack")
createResourceFromFile(oc, oc.Namespace(), sctpServiceDualstack)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("sctpservice-dualstack"))
g.By("get service ipv4 and ipv6 address")
sctpServiceIPv4, sctpServiceIPv6 := getSvcIPdualstack(oc, oc.Namespace(), "sctpservice-dualstack")
g.By("test ipv4 in dualstack cluster")
g.By("sctpserver pod start to wait for sctp traffic")
_, _, _, err1 := oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
o.Expect(err1).NotTo(o.HaveOccurred())
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err2 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
_, err3 := e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv4+" 30102 --sctp; }")
o.Expect(err3).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err4 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err4).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
g.By("test ipv6 in dualstack cluster")
g.By("sctpserver pod start to wait for sctp traffic")
oc.Run("exec").Args("-n", oc.Namespace(), sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
time.Sleep(5 * time.Second)
g.By("check sctp process enabled in the sctp server pod")
msg, err5 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err5).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(msg, "/usr/bin/ncat -l 30102 --sctp")).To(o.BeTrue())
g.By("sctpclient pod start to send sctp traffic")
e2eoutput.RunHostCmd(oc.Namespace(), sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServiceIPv6+" 30102 --sctp; }")
g.By("server sctp process will end after get sctp traffic from sctp client")
time.Sleep(5 * time.Second)
msg1, err6 := e2eoutput.RunHostCmd(oc.Namespace(), sctpServerPodName, "ps aux | grep sctp")
o.Expect(err6).NotTo(o.HaveOccurred())
o.Expect(msg1).NotTo(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
2651640c-d7ad-4417-93f8-e0a6bb642e39
|
NonPreRelease-PreChkUpgrade-Author:huirwang-Medium-44765-Check the sctp works well after upgrade. [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
g.It("NonPreRelease-PreChkUpgrade-Author:huirwang-Medium-44765-Check the sctp works well after upgrade. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient-upgrade.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver-upgrade.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
ns = "44765-upgrade-ns"
)
g.By("Enable sctp module in all workers")
prepareSCTPModule(oc, sctpModule)
g.By("create new namespace")
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
sctpClientPodname := getPodName(oc, ns, "name=sctpclient")[0]
g.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
sctpServerPodName := getPodName(oc, ns, "name=sctpserver")[0]
ipStackType := checkIPStackType(oc)
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP := getPodIPv4(oc, ns, sctpServerPodName)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP := getPodIPv6(oc, ns, sctpServerPodName, ipStackType)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
4871ef6b-28de-45c9-9e7c-7007a938fc77
|
NonPreRelease-PstChkUpgrade-Author:huirwang-Medium-44765-Check the sctp works well after upgrade. [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
g.It("NonPreRelease-PstChkUpgrade-Author:huirwang-Medium-44765-Check the sctp works well after upgrade. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
ns = "44765-upgrade-ns"
)
g.By("Check if sctp upgrade namespace existed")
//Skip if no 44765-upgrade-ns which means no prepare before upgrade or parepare failed
nsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", ns).Execute()
if nsErr != nil {
g.Skip("Skip for no namespace 44765-upgrade-ns in post upgrade.")
}
g.By("Get sctp upgrade setup info")
e2e.Logf("The sctp upgrade namespace is %s ", ns)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("namespace", ns, "--ignore-not-found").Execute()
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
sctpClientPodname := getPodName(oc, ns, "name=sctpclient")[0]
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
sctpServerPodName := getPodName(oc, ns, "name=sctpserver")[0]
g.By("Enable sctp module on all workers")
prepareSCTPModule(oc, sctpModule)
ipStackType := checkIPStackType(oc)
g.By("test ipv4 in ipv4 cluster or dualstack cluster")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
g.By("get ipv4 address from the sctpServerPod")
sctpServerPodIP := getPodIPv4(oc, ns, sctpServerPodName)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
g.By("test ipv6 in ipv6 cluster or dualstack cluster")
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
g.By("get ipv6 address from the sctpServerPod")
sctpServerPodIP := getPodIPv6(oc, ns, sctpServerPodName, ipStackType)
g.By("sctpserver pod start to wait for sctp traffic")
cmdNcat, _, _, err := oc.AsAdmin().Run("exec").Args("-n", ns, sctpServerPodName, "--", "/usr/bin/ncat", "-l", "30102", "--sctp").Background()
defer cmdNcat.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check sctp process enabled in the sctp server pod")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").Should(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "No sctp process running on sctp server pod")
g.By("sctpclient pod start to send sctp traffic")
_, err1 := e2eoutput.RunHostCmd(ns, sctpClientPodname, "echo 'Test traffic using sctp port from sctpclient to sctpserver' | { ncat -v "+sctpServerPodIP+" 30102 --sctp; }")
o.Expect(err1).NotTo(o.HaveOccurred())
g.By("server sctp process will end after get sctp traffic from sctp client")
o.Eventually(func() string {
msg, err := e2eoutput.RunHostCmd(ns, sctpServerPodName, "ps aux | grep sctp")
o.Expect(err).NotTo(o.HaveOccurred())
return msg
}, "10s", "5s").ShouldNot(o.ContainSubstring("/usr/bin/ncat -l 30102 --sctp"), "Sctp process didn't end after get sctp traffic from sctp client")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
c90b2ed3-6d85-451e-bfcf-a45b0136e456
|
ROSA-OSD_CCS-Longduration-NonPreRelease-Author:huirwang-Medium-28759-Expose SCTP NodePort Services. [Disruptive]
|
['"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
g.It("ROSA-OSD_CCS-Longduration-NonPreRelease-Author:huirwang-Medium-28759-Expose SCTP NodePort Services. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sctp")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "load-sctp-module.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
sctpServicev4 = filepath.Join(buildPruningBaseDir, "sctpservicev4.yaml")
sctpServicev6 = filepath.Join(buildPruningBaseDir, "sctpservicev6.yaml")
sctpServiceDualstack = filepath.Join(buildPruningBaseDir, "sctpservicedualstack.yaml")
)
exutil.By("install load-sctp-module in all workers")
prepareSCTPModule(oc, sctpModule)
exutil.By("create new namespace")
oc.SetupProject()
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
exutil.By("Get sctpServerPod node ")
nodeName, err3 := exutil.GetPodNodeName(oc, ns, "sctpserver")
exutil.AssertWaitPollNoErr(err3, "Cannot get sctpSeverpod node name")
ipStackType := checkIPStackType(oc)
var sctpService string
var expectedSctpService string
switch ipStackType {
case "ipv4single":
sctpService = sctpServicev4
expectedSctpService = "sctpservice-v4"
case "ipv6single":
sctpService = sctpServicev6
expectedSctpService = "sctpservice-v6"
case "dualstack":
sctpService = sctpServiceDualstack
expectedSctpService = "sctpservice-dualstack"
}
exutil.By("create sctp service")
createResourceFromFile(oc, oc.Namespace(), sctpService)
output, err := oc.WithoutNamespace().Run("get").Args("service").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(expectedSctpService))
exutil.By("get node port and node ip")
sctpNodePort := getLoadBalancerSvcNodePort(oc, oc.Namespace(), expectedSctpService)
nodeIP1, nodeIP2 := getNodeIP(oc, nodeName)
exutil.By("Verify sctp nodeport service can be accessed")
checkSCTPResultPASS(oc, ns, sctpServerPodName, sctpClientPodname, nodeIP2, sctpNodePort)
if ipStackType == "dualstack" {
exutil.By("Verify sctp nodeport service can be accessed on IPv6")
checkSCTPResultPASS(oc, ns, sctpServerPodName, sctpClientPodname, nodeIP1, sctpNodePort)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
7c2b3850-e1bf-4e9f-b207-a4e47c011187
|
ConnectedOnly-Author:huirwang-Medium-29645-Networkpolicy allow SCTP Client. [Disruptive]
|
['"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sctp-dualstack.go
|
g.It("ConnectedOnly-Author:huirwang-Medium-29645-Networkpolicy allow SCTP Client. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
sctpClientPod = filepath.Join(buildPruningBaseDir, "sctp/sctpclient.yaml")
sctpServerPod = filepath.Join(buildPruningBaseDir, "sctp/sctpserver.yaml")
sctpModule = filepath.Join(buildPruningBaseDir, "sctp/load-sctp-module.yaml")
defaultDenyPolicy = filepath.Join(buildPruningBaseDir, "networkpolicy/default-deny-ingress.yaml")
allowSCTPPolicy = filepath.Join(buildPruningBaseDir, "networkpolicy/allow-sctpclient.yaml")
sctpServerPodName = "sctpserver"
sctpClientPodname = "sctpclient"
)
exutil.By("Preparing the nodes for SCTP")
prepareSCTPModule(oc, sctpModule)
exutil.By("Setting privileges on the namespace")
ns := oc.Namespace()
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create sctpClientPod")
createResourceFromFile(oc, ns, sctpClientPod)
err1 := waitForPodWithLabelReady(oc, ns, "name=sctpclient")
exutil.AssertWaitPollNoErr(err1, "sctpClientPod is not running")
exutil.By("create sctpServerPod")
createResourceFromFile(oc, ns, sctpServerPod)
err2 := waitForPodWithLabelReady(oc, ns, "name=sctpserver")
exutil.AssertWaitPollNoErr(err2, "sctpServerPod is not running")
ipStackType := checkIPStackType(oc)
exutil.By("Verify sctp server pod can be accessed")
var sctpServerIPv6, sctpServerIPv4, sctpServerIP string
if ipStackType == "dualstack" {
sctpServerIPv6, sctpServerIPv4 = getPodIP(oc, ns, sctpServerPodName)
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
sctpServerIP, _ = getPodIP(oc, ns, sctpServerPodName)
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
exutil.By("create default deny ingress type networkpolicy")
createResourceFromFile(oc, ns, defaultDenyPolicy)
output, err := oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("default-deny-ingress"))
exutil.By("Verify sctp server pod was blocked")
if ipStackType == "dualstack" {
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, false)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, false)
} else {
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, false)
}
exutil.By("Create allow deny sctp client networkpolicy")
createResourceFromFile(oc, ns, allowSCTPPolicy)
output, err = oc.Run("get").Args("networkpolicy", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("allowsctpclient"))
exutil.By("Verify sctp server pod can be accessed again")
if ipStackType == "dualstack" {
verifySctpConnPod2IP(oc, ns, sctpServerIPv4, sctpServerPodName, sctpClientPodname, true)
verifySctpConnPod2IP(oc, ns, sctpServerIPv6, sctpServerPodName, sctpClientPodname, true)
} else {
verifySctpConnPod2IP(oc, ns, sctpServerIP, sctpServerPodName, sctpClientPodname, true)
}
})
| |||||
test
|
openshift/openshift-tests-private
|
383b7ea0-0473-468c-85c1-de128dfadc64
|
sriov_basic
|
import (
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
package networking
import (
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN sriov-legacy", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("sriov-"+getRandomString(), exutil.KubeConfigPath())
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml")
sriovOpNs = "openshift-sriov-network-operator"
vfNum = 2
)
testData := []struct {
Name string
DeviceID string
Vendor string
InterfaceName string
}{
{"e810xxv", "159b", "8086", "ens2f0"},
{"e810c", "1593", "8086", "ens2f2"},
{"x710", "1572", "8086", "ens5f0"}, //NO-CARRIER
{"bcm57414", "16d7", "14e4", "ens4f1np1"},
{"bcm57508", "1750", "14e4", "ens3f0np0"}, //NO-CARRIER
{"e810back", "1591", "8086", "ens4f2"},
}
g.BeforeEach(func() {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1/rdu2 cluster. , skip for other envrionment!!!")
}
exutil.By("check the sriov operator is running")
chkSriovOperatorStatus(oc, sriovOpNs)
})
g.AfterEach(func() {
//after each case finished testing. remove sriovnodenetworkpolicy CR
var policys []string
for _, items := range testData {
policys = append(policys, items.Name)
}
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(append([]string{"SriovNetworkNodePolicy", "-n", sriovOpNs, "--ignore-not-found"}, policys...)...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("remove sriovnetworknodepolicy %s", strings.Join(policys, " "))
waitForSriovPolicyReady(oc, sriovOpNs)
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25959-Test container with spoofchk is on [Disruptive]", func() {
var caseID = "25959-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "off",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "spoof checking on")
}()
}
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-70820-Test container with spoofchk is off [Disruptive]", func() {
var caseID = "70820-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "off",
trust: "on",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "spoof checking off")
}()
}
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25960-Test container with trust is off [Disruptive]", func() {
var caseID = "25960-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "off",
trust: "off",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "trust off")
}()
}
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-70821-Test container with trust is on [Disruptive]", func() {
var caseID = "70821-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "trust on")
}()
}
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25963-Test container with VF and set vlan minTxRate maxTxRate [Disruptive]", func() {
var caseID = "25963-"
for _, data := range testData {
data := data
//x710 do not support minTxRate for now
if data.Name == "x710" || data.Name == "bcm57414" || data.Name == "bcm57508" {
continue
}
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
vlanId: 100,
vlanQoS: 2,
minTxRate: 40,
maxTxRate: 100,
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "vlan 100, qos 2, tx rate 100 (Mbps), max_tx_rate 100Mbps, min_tx_rate 40Mbps")
}()
}
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25961-Test container with VF and set linkState is auto [Disruptive]", func() {
var caseID = "25961-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
linkState: "auto",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "link-state auto")
}()
}
})
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-71006-Test container with VF and set linkState is enable [Disruptive]", func() {
var caseID = "71006-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
linkState: "enable",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "link-state enable")
}()
}
})
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-69646-mtu testing for sriov policy [Disruptive]", func() {
var caseID = "69646-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
//configure mtu in sriovnetworknodepolicy
mtuValue := 1800
patchYamlToRestore := `[{"op":"add","path":"/spec/mtu","value":1800}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovnetworknodepolicies.sriovnetwork.openshift.io", data.Name, "-n", sriovOpNs,
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := data.Name + " patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
waitForSriovPolicyReady(oc, sriovOpNs)
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "mtu "+strconv.Itoa(mtuValue))
}()
}
})
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-69582-dpdk for sriov vf can be worked well [Disruptive]", func() {
var caseID = "69582-"
for _, data := range testData {
data := data
// skip bcm nics: OCPBUGS-30909
if strings.Contains(data.Name, "bcm") {
continue
}
// Create VF on with given device
policyName := data.Name
networkName := data.Name + "dpdk" + "net"
result := initDpdkVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml")
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: policyName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
//create pods
sriovTestPodDpdkTemplate := filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml")
sriovTestPod := sriovTestPod{
name: "sriovdpdk",
namespace: ns1,
networkName: sriovnetwork.name,
template: sriovTestPodDpdkTemplate,
}
sriovTestPod.createSriovTestPod(oc)
err1 := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk")
exutil.AssertWaitPollNoErr(err1, "this pod with label name=sriov-dpdk not ready")
g.By("Check testpmd running well")
pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, policyName)
command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac"
testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams"))
sriovTestPod.deleteSriovTestPod(oc)
}()
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
c038ae56-e27e-4def-babc-231f5ca16f2b
|
Author:zzhao-Medium-NonPreRelease-Longduration-25959-Test container with spoofchk is on [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25959-Test container with spoofchk is on [Disruptive]", func() {
var caseID = "25959-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "off",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "spoof checking on")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
cd85eebd-3a77-43b8-bb91-51e7dc6d2d42
|
Author:zzhao-Medium-NonPreRelease-Longduration-70820-Test container with spoofchk is off [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-70820-Test container with spoofchk is off [Disruptive]", func() {
var caseID = "70820-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "off",
trust: "on",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "spoof checking off")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
e8b5d959-48d6-4ea8-b569-dbdbdd3c6dec
|
Author:zzhao-Medium-NonPreRelease-Longduration-25960-Test container with trust is off [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25960-Test container with trust is off [Disruptive]", func() {
var caseID = "25960-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "off",
trust: "off",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "trust off")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
c2ecd643-4bf5-4aa9-ab4b-99e116f69442
|
Author:zzhao-Medium-NonPreRelease-Longduration-70821-Test container with trust is on [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-70821-Test container with trust is on [Disruptive]", func() {
var caseID = "70821-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "trust on")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8aa5c22f-b787-4cac-88f5-ec903f9124d3
|
Author:zzhao-Medium-NonPreRelease-Longduration-25963-Test container with VF and set vlan minTxRate maxTxRate [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25963-Test container with VF and set vlan minTxRate maxTxRate [Disruptive]", func() {
var caseID = "25963-"
for _, data := range testData {
data := data
//x710 do not support minTxRate for now
if data.Name == "x710" || data.Name == "bcm57414" || data.Name == "bcm57508" {
continue
}
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
vlanId: 100,
vlanQoS: 2,
minTxRate: 40,
maxTxRate: 100,
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "vlan 100, qos 2, tx rate 100 (Mbps), max_tx_rate 100Mbps, min_tx_rate 40Mbps")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8e7d9549-a329-4bae-8c9d-7ab76a8e47e7
|
Author:zzhao-Medium-NonPreRelease-Longduration-25961-Test container with VF and set linkState is auto [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-25961-Test container with VF and set linkState is auto [Disruptive]", func() {
var caseID = "25961-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
linkState: "auto",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "link-state auto")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
99b85bac-3ab5-42df-87db-3c7ba1c425aa
|
Author:zzhao-Medium-NonPreRelease-Longduration-71006-Test container with VF and set linkState is enable [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:zzhao-Medium-NonPreRelease-Longduration-71006-Test container with VF and set linkState is enable [Disruptive]", func() {
var caseID = "71006-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
linkState: "enable",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "link-state enable")
}()
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
11020e2e-c09e-4ad5-810c-69e825c0bdfc
|
Author:yingwang-Medium-NonPreRelease-Longduration-69646-mtu testing for sriov policy [Disruptive]
|
['"strconv"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-69646-mtu testing for sriov policy [Disruptive]", func() {
var caseID = "69646-"
for _, data := range testData {
data := data
// Create VF on with given device
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
//configure mtu in sriovnetworknodepolicy
mtuValue := 1800
patchYamlToRestore := `[{"op":"add","path":"/spec/mtu","value":1800}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovnetworknodepolicies.sriovnetwork.openshift.io", data.Name, "-n", sriovOpNs,
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := data.Name + " patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
waitForSriovPolicyReady(oc, sriovOpNs)
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: data.Name,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
chkVFStatusWithPassTraffic(oc, sriovnetwork.name, data.InterfaceName, ns1, "mtu "+strconv.Itoa(mtuValue))
}()
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
eac7c168-7f22-4377-a840-fbfbe82bbb38
|
Author:yingwang-Medium-NonPreRelease-Longduration-69582-dpdk for sriov vf can be worked well [Disruptive]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_basic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-69582-dpdk for sriov vf can be worked well [Disruptive]", func() {
var caseID = "69582-"
for _, data := range testData {
data := data
// skip bcm nics: OCPBUGS-30909
if strings.Contains(data.Name, "bcm") {
continue
}
// Create VF on with given device
policyName := data.Name
networkName := data.Name + "dpdk" + "net"
result := initDpdkVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
continue
}
func() {
ns1 := "e2e-" + caseID + data.Name
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-template.yaml")
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: policyName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
//defer
defer func() {
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
}()
sriovnetwork.createSriovNetwork(oc)
//create pods
sriovTestPodDpdkTemplate := filepath.Join(buildPruningBaseDir, "sriov-dpdk-template.yaml")
sriovTestPod := sriovTestPod{
name: "sriovdpdk",
namespace: ns1,
networkName: sriovnetwork.name,
template: sriovTestPodDpdkTemplate,
}
sriovTestPod.createSriovTestPod(oc)
err1 := waitForPodWithLabelReady(oc, ns1, "name=sriov-dpdk")
exutil.AssertWaitPollNoErr(err1, "this pod with label name=sriov-dpdk not ready")
g.By("Check testpmd running well")
pciAddress := getPciAddress(sriovTestPod.namespace, sriovTestPod.name, policyName)
command := "testpmd -l 2-3 --in-memory -w " + pciAddress + " --socket-mem 1024 -n 4 --proc-type auto --file-prefix pg -- --disable-rss --nb-cores=1 --rxq=1 --txq=1 --auto-start --forward-mode=mac"
testpmdOutput, err := e2eoutput.RunHostCmd(sriovTestPod.namespace, sriovTestPod.name, command)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(testpmdOutput).Should(o.MatchRegexp("forwards packets on 1 streams"))
sriovTestPod.deleteSriovTestPod(oc)
}()
}
})
| |||||
test
|
openshift/openshift-tests-private
|
35963fe6-432c-41f0-9122-bc885e8ee777
|
sriov_install
|
import (
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_install.go
|
package networking
import (
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN sriov installation", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("sriov-"+getRandomString(), exutil.KubeConfigPath())
)
g.BeforeEach(func() {
//skip this on fips cluster, due to bug https://issues.redhat.com/browse/OCPBUGS-22779
if checkFips(oc) {
g.Skip("Skip this case since sriov pod cannot be running on fips")
}
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(msg, "sriov.openshift-qe.sdn.com") {
g.Skip("Skip this case since sriov cluster already setup the operator during deploying the cluster")
}
})
g.It("LEVEL0-Author:zzhao-High-55957-Sriov operator can be setup ", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
namespaceTemplate = filepath.Join(buildPruningBaseDir, "namespace-template.yaml")
operatorGroupTemplate = filepath.Join(buildPruningBaseDir, "operatorgroup-template.yaml")
subscriptionTemplate = filepath.Join(buildPruningBaseDir, "subscription-template.yaml")
sriovOperatorconfig = filepath.Join(buildPruningBaseDir, "sriovoperatorconfig.yaml")
opNamespace = "openshift-sriov-network-operator"
opName = "sriov-network-operators"
)
sub := subscriptionResource{
name: "sriov-network-operator-subsription",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
e2e.Logf("Operator install check successfull as part of setup !!!!!")
exutil.By("SUCCESS - sriov operator installed")
exutil.By("check sriov version if match the ocp version")
operatorVersion := getOperatorVersion(oc, sub.name, sub.namespace)
ocpversion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(operatorVersion).Should(o.MatchRegexp(ocpversion))
exutil.By("create the default sriovoperatorconfig")
createResourceFromFile(oc, opNamespace, sriovOperatorconfig)
exutil.By("Check all pods in sriov namespace are running")
chkSriovOperatorStatus(oc, sub.namespace)
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
f57d362b-3990-4036-9264-f6aec14525e2
|
LEVEL0-Author:zzhao-High-55957-Sriov operator can be setup
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_install.go
|
g.It("LEVEL0-Author:zzhao-High-55957-Sriov operator can be setup ", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
namespaceTemplate = filepath.Join(buildPruningBaseDir, "namespace-template.yaml")
operatorGroupTemplate = filepath.Join(buildPruningBaseDir, "operatorgroup-template.yaml")
subscriptionTemplate = filepath.Join(buildPruningBaseDir, "subscription-template.yaml")
sriovOperatorconfig = filepath.Join(buildPruningBaseDir, "sriovoperatorconfig.yaml")
opNamespace = "openshift-sriov-network-operator"
opName = "sriov-network-operators"
)
sub := subscriptionResource{
name: "sriov-network-operator-subsription",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
e2e.Logf("Operator install check successfull as part of setup !!!!!")
exutil.By("SUCCESS - sriov operator installed")
exutil.By("check sriov version if match the ocp version")
operatorVersion := getOperatorVersion(oc, sub.name, sub.namespace)
ocpversion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(operatorVersion).Should(o.MatchRegexp(ocpversion))
exutil.By("create the default sriovoperatorconfig")
createResourceFromFile(oc, opNamespace, sriovOperatorconfig)
exutil.By("Check all pods in sriov namespace are running")
chkSriovOperatorStatus(oc, sub.namespace)
})
| |||||
test
|
openshift/openshift-tests-private
|
8f027fc1-0612-439f-bf01-32234f65f3c2
|
sriov_nic
|
import (
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
package networking
import (
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-networking] SDN sriov-nic", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("sriov-"+getRandomString(), exutil.KubeConfigPath())
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
sriovNeworkTemplate = filepath.Join(buildPruningBaseDir, "sriovnetwork-whereabouts-template.yaml")
sriovOpNs = "openshift-sriov-network-operator"
vfNum = 2
)
type testData = struct {
Name string
DeviceID string
Vendor string
InterfaceName string
}
data := testData{
Name: "x710",
DeviceID: "1572",
Vendor: "8086",
InterfaceName: "ens5f0",
}
g.BeforeEach(func() {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1/rdu2 cluster. , skip for other envrionment!!!")
}
exutil.By("check the sriov operator is running")
chkSriovOperatorStatus(oc, sriovOpNs)
sriovNodeList, nodeErr := exutil.GetClusterNodesBy(oc, "sriov")
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(sriovNodeList) < 1 {
g.Skip("Not enough SR-IOV nodes for this test, skip the test!")
}
})
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-69600-VF use and release testing [Disruptive]", func() {
var caseID = "69600-"
networkName := caseID + "net"
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml")
// Create VF on with given device
defer rmSriovNetworkPolicy(oc, data.Name, sriovOpNs)
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
g.Skip("This nic which has deviceID is not found on this cluster!!!")
}
e2e.Logf("###############start to test %v sriov on nic %v ################", data.Name, data.InterfaceName)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
//defer
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
//create full number pods which use all of the VFs
testpodPrex := "testpod"
workerList := getWorkerNodesWithNic(oc, data.DeviceID, data.InterfaceName)
o.Expect(workerList).NotTo(o.BeEmpty())
numWorker := len(workerList)
fullVFNum := vfNum * numWorker
createNumPods(oc, sriovnetwork.name, ns1, testpodPrex, fullVFNum)
//creating new pods will fail because all VFs are used.
sriovTestNewPod := sriovTestPod{
name: "testpodnew",
namespace: ns1,
networkName: sriovnetwork.name,
template: sriovTestPodTemplate,
}
sriovTestNewPod.createSriovTestPod(oc)
e2e.Logf("creating new testpod should fail, because all VFs are used")
o.Eventually(func() string {
podStatus, _ := getPodStatus(oc, ns1, sriovTestNewPod.name)
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", sriovTestNewPod.name))
//delete one pod and the testpodnew will be ready
testpodName := testpodPrex + "0"
sriovTestRmPod := sriovTestPod{
name: testpodName,
namespace: ns1,
networkName: sriovnetwork.name,
template: sriovTestPodTemplate,
}
sriovTestRmPod.deleteSriovTestPod(oc)
err := waitForPodWithLabelReady(oc, sriovTestNewPod.namespace, "app="+sriovTestNewPod.name)
exutil.AssertWaitPollNoErr(err, "The new created pod is not ready after one VF is released")
})
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-24780-NAD will be deleted too when sriovnetwork is deleted", func() {
var caseID = "24780-"
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
networkName := caseID + "net"
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: "none",
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Can't find NAD after sriovnetwork is created")
//delete sriovnetwork
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
//NAD should be deleted too
errChk2 := chkNAD(oc, ns1, sriovnetwork.name, false)
exutil.AssertWaitPollNoErr(errChk2, "NAD was not removed after sriovnetwork is removed")
})
g.It("Author:yingwang-Medium-NonPreRelease-24713-NAD can be also updated when networknamespace is change", func() {
var caseID = "24713-"
ns1 := "e2e-" + caseID + data.Name
ns2 := "e2e-" + caseID + data.Name + "-new"
networkName := caseID + "net"
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns2, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns2)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: "none",
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, fmt.Sprintf("Can find NAD in ns %v", ns1))
errChk2 := chkNAD(oc, ns2, sriovnetwork.name, true)
exutil.AssertWaitPollWithErr(errChk2, fmt.Sprintf("Can not find NAD in ns %v", ns2))
//change networknamespace and check NAD
patchYamlToRestore := `[{"op":"replace","path":"/spec/networkNamespace","value":"` + ns2 + `"}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovnetwork", sriovnetwork.name, "-n", sriovOpNs,
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := sriovnetwork.name + " patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
errChk1 = chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollWithErr(errChk1, fmt.Sprintf("Can not find NAD in ns %v after networknamespace changed", ns1))
errChk2 = chkNAD(oc, ns2, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk2, fmt.Sprintf("Can find NAD in ns %v after networknamespace changed", ns2))
})
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-25287-NAD should be able to restore by sriov operator when it was deleted", func() {
var caseID = "25287-"
networkName := caseID + "net"
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: "nonE",
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, fmt.Sprintf("Can find NAD in ns %v", ns1))
//remove NAD and check again
rmNAD(oc, ns1, sriovnetwork.name)
errChk2 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk2, fmt.Sprintf("Can find NAD in ns %v as expected after NAD is removed", ns1))
})
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-21364-Create pod with sriov-cni plugin and macvlan on the same interface [Disruptive]", func() {
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
var caseID = "21364-"
networkName := caseID + "net"
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-multinet-template.yaml")
netMacvlanTemplate := filepath.Join(buildPruningBaseDir, "nad-macvlan-template.yaml")
netMacVlanName := "macvlannet"
// Create VF on with given device
defer rmSriovNetworkPolicy(oc, data.Name, sriovOpNs)
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
g.Skip("This nic which has deviceID is not found on this cluster!!!")
}
e2e.Logf("###############start to test %v sriov on nic %v ################", data.Name, data.InterfaceName)
exutil.By("Create sriovNetwork nad to generate net-attach-def on the target namespace")
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
linkState: "enable",
}
networkMacvlan := sriovNetResource{
name: netMacVlanName,
namespace: ns1,
kind: "NetworkAttachmentDefinition",
tempfile: netMacvlanTemplate,
}
//defer
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
defer networkMacvlan.delete(oc)
networkMacvlan.create(oc, "NADNAME="+networkMacvlan.name, "NAMESPACE="+networkMacvlan.namespace)
//create pods with both sriovnetwork and macvlan network
for i := 0; i < 2; i++ {
sriovTestPod := sriovNetResource{
name: "testpod" + strconv.Itoa(i),
namespace: ns1,
kind: "pod",
tempfile: sriovTestPodTemplate,
}
defer sriovTestPod.delete(oc)
sriovTestPod.create(oc, "PODNAME="+sriovTestPod.name, "NETWORKE1="+sriovnetwork.name, "NETWORKE2="+networkMacvlan.name, "NAMESPACE="+ns1)
err := waitForPodWithLabelReady(oc, sriovTestPod.namespace, "name="+sriovTestPod.name)
exutil.AssertWaitPollNoErr(err, "The new created pod is not ready")
}
chkPodsPassTraffic(oc, "testpod0", "testpod1", "net1", ns1)
chkPodsPassTraffic(oc, "testpod0", "testpod1", "net2", ns1)
})
g.It("Author:yingwang-Medium-NonPreRelease-25847-SR-IOV operator-webhook can be disable by edit SR-IOV Operator Config [Serial]", func() {
// check webhook pods are running
chkPodsStatus(oc, sriovOpNs, "app=operator-webhook")
//disable webhook
defer chkSriovWebhookResource(oc, true)
defer chkPodsStatus(oc, sriovOpNs, "app=operator-webhook")
defer setSriovWebhook(oc, "true")
setSriovWebhook(oc, "false")
// webhook pods should be deleted
o.Eventually(func() string {
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=operator-webhook", "-n", sriovOpNs).Output()
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.ContainSubstring("No resources found"), fmt.Sprintf("sriov webhook pods are removed"))
chkSriovWebhookResource(oc, false)
// set webhook true
setSriovWebhook(oc, "true")
// webhook pods should be recovered
chkPodsStatus(oc, sriovOpNs, "app=operator-webhook")
chkSriovWebhookResource(oc, true)
})
g.It("Author:yingwang-Medium-NonPreRelease-25814-SR-IOV resource injector can be disable by edit SR-IOV Operator Config [Serial]", func() {
// check network-resources-injector pods are running
chkPodsStatus(oc, sriovOpNs, "app=network-resources-injector")
//disable network-resources-injector
defer chkSriovInjectorResource(oc, true)
defer chkPodsStatus(oc, sriovOpNs, "app=network-resources-injector")
defer setSriovInjector(oc, "true")
setSriovInjector(oc, "false")
// network-resources-injector pods should be deleted
o.Eventually(func() string {
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=network-resources-injector", "-n", sriovOpNs).Output()
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.ContainSubstring("No resources found"), fmt.Sprintf("sriov network-resources-injector pods are removed"))
chkSriovInjectorResource(oc, false)
// set network-resources-injector true
setSriovInjector(oc, "true")
// network-resources-injector pods should be recovered
chkPodsStatus(oc, sriovOpNs, "app=network-resources-injector")
chkSriovInjectorResource(oc, true)
})
})
var _ = g.Describe("[sig-networking] SDN sriov externallyManaged", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("sriov-"+getRandomString(), exutil.KubeConfigPath())
testDataDir = exutil.FixturePath("testdata", "networking")
sriovOpNs = "openshift-sriov-network-operator"
)
type testData = struct {
Name string
DeviceID string
Vendor string
InterfaceName string
}
data := testData{
Name: "x710",
DeviceID: "1572",
Vendor: "8086",
InterfaceName: "ens5f0",
}
sriovDevices := make(map[string]testData)
var node string
var sriovNodeList []string
var nodeErr error
g.BeforeEach(func() {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1/rdu2 cluster. , skip for other envrionment!!!")
}
exutil.By("check the sriov operator is running")
chkSriovOperatorStatus(oc, sriovOpNs)
sriovNodeList, nodeErr = exutil.GetClusterNodesBy(oc, "sriov")
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if len(sriovNodeList) < 1 {
g.Skip("Not enough SR-IOV nodes for this test, skip the test!")
}
node = sriovNodeList[0]
// Record SRIOV device data on each SR-IOV node of RDUs
if err != nil || strings.Contains(msg, "sriov.openshift-qe.sdn.com") {
e2e.Logf("Running the test on RDU1")
data = testData{
Name: "e810xxv",
DeviceID: "159b",
Vendor: "8086",
InterfaceName: "ens2f0",
}
}
if err != nil || strings.Contains(msg, "offload.openshift-qe.sdn.com") {
e2e.Logf("Running the test on RDU2")
data = testData{
Name: "xl710",
DeviceID: "1583",
Vendor: "8086",
InterfaceName: "ens2f1",
}
}
g.By("0.0 Check if the deviceID exists on the cluster")
if !checkDeviceIDExist(oc, sriovOpNs, data.DeviceID) {
g.Skip("the cluster does not contain the sriov card. skip this testing!")
}
exutil.By("0.1 Get the node's name that has the device")
for _, thisNode := range sriovNodeList {
output, err := exutil.DebugNodeRetryWithOptionsAndChroot(oc, thisNode, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, data.InterfaceName) {
node = thisNode
break
}
}
sriovDevices[node] = data
e2e.Logf("\n what node is used for the test: %s\n", node)
exutil.By("0.2 Check if the interface has carrier")
if checkInterfaceNoCarrier(oc, node, sriovDevices[node].InterfaceName) {
g.Skip("The interface on the device has NO-CARRIER, skip this testing!")
}
})
g.It("Author:jechen-Longduration-NonPreRelease-High-63533-ExternallyManaged: Recreate VFs when SR-IOV policy is applied [Disruptive][Flaky]", func() {
nmstateCRTemplate := filepath.Join(testDataDir, "nmstate", "nmstate-cr-template.yaml")
nncpAddVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-vfs-specific-node-template.yaml")
nncpDelVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-remove-vfs-specific-node-template.yaml")
sriovNodeNetworkPolicyTemplate := filepath.Join(testDataDir, "sriov", "sriovnodepolicy-externallymanaged-template.yaml")
sriovNeworkTemplate := filepath.Join(testDataDir, "sriov", "sriovnetwork2-template.yaml")
sriovTestPodTemplate := filepath.Join(testDataDir, "sriov", "sriovtestpod2-with-mac-template.yaml")
opNamespace := "openshift-nmstate"
exutil.By("\n 1. Install nmstate operator and create nmstate CR \n")
installNMstateOperator(oc)
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("\n 2. Apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy := VFPolicyResource{
name: "vf-policy-63533",
intfname: sriovDevices[node].InterfaceName,
nodename: node,
totalvfs: 2,
template: nncpAddVFTemplate,
}
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 2.1 Verify the policy is applied \n")
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 2.2 Verify the created VFs found in node network state \n")
output, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 3. Create SR-IOV policy on the node with ExternallyManaged set to true \n")
sriovNNPolicy := sriovNetworkNodePolicySpecificNode{
policyName: "sriovnn",
deviceType: "netdevice",
pfName: sriovDevices[node].InterfaceName,
numVfs: 2,
resourceName: "sriovnn",
nodename: node,
namespace: sriovOpNs,
template: sriovNodeNetworkPolicyTemplate,
}
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 4. Create a target namespce, then create sriovNetwork to generate net-attach-def on the target namespace \n")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
sriovnetwork := sriovNetwork{
name: sriovNNPolicy.policyName,
resourceName: sriovNNPolicy.resourceName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
e2e.Logf("\n expect to see NAD of %s in namespace : %s\n", sriovnetwork.name, ns1)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
// exutil.By("\n 5. Create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Create test pod1 on the target namespace \n")
sriovTestPod1 := sriovTestPodMAC{
name: "sriov-63533-test-pod1",
namespace: ns1,
ipaddr: "192.168.10.1/24",
macaddr: "20:04:0f:f1:88:01",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 5.2 Create test pod2 on the target namespace \n")
sriovTestPod2 := sriovTestPodMAC{
name: "sriov-63533-test-pod2",
namespace: ns1,
ipaddr: "192.168.10.2/24",
macaddr: "",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 5.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
exutil.By("\n 6. Remove SR-IOV policy, wait for nns state to be stable, then verify VFs still remind \n")
removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
waitForSriovPolicyReady(oc, sriovOpNs)
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs reminded!!!", VFPolicy.totalvfs)
exutil.By("\n 7. Apply policy by nmstate to remove VFs then recreate VFs with one extra VF\n")
exutil.By("\n 7.1. Apply policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).ShouldNot(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are deleted correctly.\n", VFPolicy.totalvfs)
exutil.By("\n 7.2. Apply policy by nmstate to add VFs with an extra VF\n")
VFPolicy.template = nncpAddVFTemplate
VFPolicy.totalvfs = 3
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to recreate VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v2"),
), "Not all %d VFs are added correctly.\n", VFPolicy.totalvfs)
exutil.By("\n 8. Recreate test pods and verify connectivity betwen two pods\n")
sriovTestPod1.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
exutil.By("\n 9. Apply policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).ShouldNot(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v2"),
), "Not all %d VFs are deleted correctly.\n", VFPolicy.totalvfs)
})
g.It("Author:jechen-Longduration-NonPreRelease-Medium-63534-Verify ExternallyManaged SR-IOV network with options [Disruptive][Flaky]", func() {
nmstateCRTemplate := filepath.Join(testDataDir, "nmstate", "nmstate-cr-template.yaml")
nncpAddVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-vfs-opt-specific-node-template.yaml")
nncpDelVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-remove-vfs-specific-node-template.yaml")
sriovNodeNetworkPolicyTemplate := filepath.Join(testDataDir, "sriov", "sriovnodepolicy-externallymanaged-template.yaml")
sriovNeworkTemplate := filepath.Join(testDataDir, "sriov", "sriovnetwork3-options-template.yaml")
sriovTestPodTemplate := filepath.Join(testDataDir, "sriov", "sriovtestpod2-with-mac-template.yaml")
opNamespace := "openshift-nmstate"
exutil.By("\n 1. Install nmstate operator and create nmstate CR \n")
installNMstateOperator(oc)
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("\n 2. Apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy := VFPolicyResource{
name: "vf-policy-63534",
intfname: sriovDevices[node].InterfaceName,
nodename: node,
totalvfs: 2,
template: nncpAddVFTemplate,
}
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 2.1 Verify the policy is applied \n")
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 2.2 Verify the created VFs found in node network state \n")
output, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 3. Create SR-IOV policy on the node with ExternallyManaged set to true \n")
sriovNNPolicy := sriovNetworkNodePolicySpecificNode{
policyName: "sriovnn",
deviceType: "netdevice",
pfName: sriovDevices[node].InterfaceName,
numVfs: 2,
resourceName: "sriovnn",
nodename: node,
namespace: sriovOpNs,
template: sriovNodeNetworkPolicyTemplate,
}
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 4. Create a target namespce, then create sriovNetwork to generate net-attach-def on the target namespace \n")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
sriovnetwork := sriovNetwork{
name: sriovNNPolicy.policyName,
resourceName: sriovNNPolicy.resourceName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
e2e.Logf("\n expect to see NAD of %s in namespace : %s\n", sriovnetwork.name, ns1)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
exutil.By("\n 5. Create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Test pods with IPv4, IPv6 and dualstack addresses will be tested in 3 iterations\n")
addressPool1 := []string{"192.168.10.1/24", "2001:db8:abcd:0012::1/64", "192.168.10.1/24\", \"2001:db8:abcd:0012::1/64"}
addressPool2 := []string{"192.168.10.2/24", "2001:db8:abcd:0012::2/64", "192.168.10.2/24\", \"2001:db8:abcd:0012::2/64"}
for i := 0; i < 3; i++ {
e2e.Logf("\n ************************* No %d set of test pods ******************\n", i+1)
exutil.By("\n Create test pod1 on the target namespace \n")
sriovTestPod1 := sriovTestPodMAC{
name: "sriov-63534-test-pod1",
namespace: ns1,
ipaddr: addressPool1[i],
macaddr: "20:04:0f:f1:88:01",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 5.2 Create test pod2 on the target namespace \n")
sriovTestPod2 := sriovTestPodMAC{
name: "sriov-63534-test-pod2",
namespace: ns1,
ipaddr: addressPool2[i],
macaddr: "",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 5.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
// wait a little before going to next iteration to recreate test pods with next set of addresses
time.Sleep(3 * time.Second)
}
exutil.By("\n 6. Remove SR-IOV policy, wait for nns state to be stable, then verify VFs still remind \n")
removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
waitForSriovPolicyReady(oc, sriovOpNs)
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs reminded!!!", VFPolicy.totalvfs)
exutil.By("\n 7. Apply policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).ShouldNot(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are deleted correctly.\n", VFPolicy.totalvfs)
})
g.It("Author:jechen-Longduration-NonPreRelease-High-63527-High-63537-High-46528-High-46530-High-46532-High-46533-Verify ExternallyManaged functionality with different IP protocols before and after SRIOV operator removal and re-installation [Disruptive]", func() {
nmstateCRTemplate := filepath.Join(testDataDir, "nmstate", "nmstate-cr-template.yaml")
nncpAddVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-vfs-specific-node-template.yaml")
nncpDelVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-remove-vfs-specific-node-template.yaml")
sriovNodeNetworkPolicyTemplate := filepath.Join(testDataDir, "sriov", "sriovnodepolicy-externallymanaged-template.yaml")
sriovNeworkTemplate := filepath.Join(testDataDir, "sriov", "sriovnetwork2-template.yaml")
sriovTestPodTemplate := filepath.Join(testDataDir, "sriov", "sriovtestpod2-with-mac-template.yaml")
opNamespace := "openshift-nmstate"
exutil.By("\n **************** Before SRIOV un-installation: verify externallyManaged SRIOV functionality ***********************\n")
exutil.By("\n 1. Before SRIOV un-stallation: install nmstate operator and create nmstate CR \n")
installNMstateOperator(oc)
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("\n 2. Before SRIOV un-stallation: Apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy := VFPolicyResource{
name: "vf-policy-63537",
intfname: sriovDevices[node].InterfaceName,
nodename: node,
totalvfs: 2,
template: nncpAddVFTemplate,
}
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 2.1 Verify the policy is applied \n")
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 2.2 Verify the created VFs found in node network state \n")
output, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 3. Before SRIOV un-stallation: create SR-IOV policy on the node with ExternallyManaged set to true \n")
sriovNNPolicy := sriovNetworkNodePolicySpecificNode{
policyName: "sriovnn",
deviceType: "netdevice",
pfName: sriovDevices[node].InterfaceName,
numVfs: 2,
resourceName: "sriovnn",
nodename: node,
namespace: sriovOpNs,
template: sriovNodeNetworkPolicyTemplate,
}
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 4. Before SRIOV un-stallation: create a target namespce, then create sriovNetwork to generate net-attach-def on the target namespace \n")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
sriovnetwork := sriovNetwork{
name: sriovNNPolicy.policyName,
resourceName: sriovNNPolicy.resourceName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
exutil.By("\n 5. Before SRIOV un-stallation: create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Test pods with IPv4, IPv6 and dualstack addresses will be tested in 3 iterations\n")
addressPool1 := []string{"192.168.10.1/24", "2001:db8:abcd:0012::1/64", "192.168.10.1/24\", \"2001:db8:abcd:0012::1/64"}
addressPool2 := []string{"192.168.10.2/24", "2001:db8:abcd:0012::2/64", "192.168.10.2/24\", \"2001:db8:abcd:0012::2/64"}
var sriovTestPod1, sriovTestPod2 sriovTestPodMAC
for i := 0; i < 3; i++ {
e2e.Logf("\n ************************* No %d set of test pods ******************\n", i+1)
exutil.By("\n 5.1 Create test pod1 on the target namespace \n")
sriovTestPod1 = sriovTestPodMAC{
name: "sriov-test-pod1",
namespace: ns1,
ipaddr: addressPool1[i],
macaddr: "20:04:0f:f1:88:01",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 5.2 Create test pod2 on the target namespace \n")
sriovTestPod2 = sriovTestPodMAC{
name: "sriov-test-pod2",
namespace: ns1,
ipaddr: addressPool2[i],
macaddr: "",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 5.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
// wait a little before going to next iteration to recreate test pods with next set of addresses
time.Sleep(3 * time.Second)
}
exutil.By("\n 6.1 Apply VF removal policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
exutil.By("\n 6.2 Delete the VFPolicy\n")
deleteNNCP(oc, VFPolicy.name)
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("nncp", VFPolicy.name).Output()
o.Expect(strings.Contains(output, "not found")).To(o.BeTrue())
exutil.By("\n . ****************** SRIOV operator un-installation then re-installation ***********************\n")
exutil.By("\n 7. Uninstall SRIOV operator \n")
defer installSriovOperator(oc, sriovOpNs)
uninstallSriovOperator(oc, sriovOpNs)
exutil.By("\n 8. Re-install SRIOV operator")
installSriovOperator(oc, sriovOpNs)
// Due to https://bugzilla.redhat.com/show_bug.cgi?id=2033440, keep the placeholder but comment out the webhook failurePolicy check for now
// exutil.By("\n 3. Check webhook failurePolicy after re-installation \n")
// chkOutput, _ := exec.Command("bash", "-c", "oc get mutatingwebhookconfigurations network-resources-injector-config -oyaml | grep failurePolicy").Output()
// e2e.Logf("\n failurePolicy for mutatingwebhookconfigurations network-resources-injector-config: %s\n", chkOutput)
// o.Expect(strings.Contains(string(chkOutput), "Ignore")).To(o.BeTrue())
// chkOutput, _ = exec.Command("bash", "-c", "oc get mutatingwebhookconfigurations sriov-operator-webhook-config -oyaml | grep failurePolicy").Output()
// e2e.Logf("\n failurePolicy for mutatingwebhookconfigurations sriov-operator-webhook-config: %s\n", chkOutput)
// o.Expect(strings.Contains(string(chkOutput), "Ignore")).To(o.BeTrue())
// chkOutput, _ = exec.Command("bash", "-c", "oc get ValidatingWebhookConfiguration sriov-operator-webhook-config -oyaml | grep failurePolicy").Output()
// e2e.Logf("\n failurePolicy for ValidatingWebhookConfiguration sriov-operator-webhook-config: %s\n", chkOutput)
// o.Expect(strings.Contains(string(chkOutput), "Ignore")).To(o.BeTrue())
exutil.By("\n *********************** Post SRIOV re-installation: verify externallyManaged SRIOV functionality again ***********************\n")
exutil.By("\n 9. Post sriov re-installation: re-apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy.template = nncpAddVFTemplate
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 9.1 Verify the policy is applied \n")
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 9.2 Verify the created VFs found in node network state \n")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 10. Post sriov re-installation: re-create SR-IOV policy on the node with ExternallyManaged set to true \n")
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 11. Post sriov re-installation: re-create sriovNetwork to generate net-attach-def on the target namespace \n")
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 = chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
exutil.By("\n 12. Post sriov re-installation: re-create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Test pods with IPv4, IPv6 and dualstack addresses will be tested in 3 iterations\n")
for i := 0; i < 3; i++ {
e2e.Logf("\n ************************* No %d set of test pods ******************\n", i+1)
exutil.By("\n 12.1 Create test pod1 on the target namespace \n")
sriovTestPod1.ipaddr = addressPool1[i]
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 12.2 Create test pod2 on the target namespace \n")
sriovTestPod2.ipaddr = addressPool2[i]
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 12.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
// wait a little before going to next iteration to recreate test pods with next set of addresses
time.Sleep(3 * time.Second)
}
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
8279ad13-c2a2-4e3f-aeaf-690e46c9d49c
|
Author:yingwang-Medium-NonPreRelease-Longduration-69600-VF use and release testing [Disruptive]
|
['"fmt"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-69600-VF use and release testing [Disruptive]", func() {
var caseID = "69600-"
networkName := caseID + "net"
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml")
// Create VF on with given device
defer rmSriovNetworkPolicy(oc, data.Name, sriovOpNs)
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
g.Skip("This nic which has deviceID is not found on this cluster!!!")
}
e2e.Logf("###############start to test %v sriov on nic %v ################", data.Name, data.InterfaceName)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
//defer
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
//create full number pods which use all of the VFs
testpodPrex := "testpod"
workerList := getWorkerNodesWithNic(oc, data.DeviceID, data.InterfaceName)
o.Expect(workerList).NotTo(o.BeEmpty())
numWorker := len(workerList)
fullVFNum := vfNum * numWorker
createNumPods(oc, sriovnetwork.name, ns1, testpodPrex, fullVFNum)
//creating new pods will fail because all VFs are used.
sriovTestNewPod := sriovTestPod{
name: "testpodnew",
namespace: ns1,
networkName: sriovnetwork.name,
template: sriovTestPodTemplate,
}
sriovTestNewPod.createSriovTestPod(oc)
e2e.Logf("creating new testpod should fail, because all VFs are used")
o.Eventually(func() string {
podStatus, _ := getPodStatus(oc, ns1, sriovTestNewPod.name)
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.Equal("Pending"), fmt.Sprintf("Pod: %s should not be in Running state", sriovTestNewPod.name))
//delete one pod and the testpodnew will be ready
testpodName := testpodPrex + "0"
sriovTestRmPod := sriovTestPod{
name: testpodName,
namespace: ns1,
networkName: sriovnetwork.name,
template: sriovTestPodTemplate,
}
sriovTestRmPod.deleteSriovTestPod(oc)
err := waitForPodWithLabelReady(oc, sriovTestNewPod.namespace, "app="+sriovTestNewPod.name)
exutil.AssertWaitPollNoErr(err, "The new created pod is not ready after one VF is released")
})
| |||||
test case
|
openshift/openshift-tests-private
|
43f96fbd-9225-48e6-ba08-79b3a6924000
|
Author:yingwang-Medium-NonPreRelease-Longduration-24780-NAD will be deleted too when sriovnetwork is deleted
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-24780-NAD will be deleted too when sriovnetwork is deleted", func() {
var caseID = "24780-"
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
networkName := caseID + "net"
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: "none",
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Can't find NAD after sriovnetwork is created")
//delete sriovnetwork
rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
//NAD should be deleted too
errChk2 := chkNAD(oc, ns1, sriovnetwork.name, false)
exutil.AssertWaitPollNoErr(errChk2, "NAD was not removed after sriovnetwork is removed")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
efc922b2-3a33-4493-9a2e-4164129e53fb
|
Author:yingwang-Medium-NonPreRelease-24713-NAD can be also updated when networknamespace is change
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-24713-NAD can be also updated when networknamespace is change", func() {
var caseID = "24713-"
ns1 := "e2e-" + caseID + data.Name
ns2 := "e2e-" + caseID + data.Name + "-new"
networkName := caseID + "net"
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns1, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns1)
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", ns2, "--ignore-not-found").Execute()
exutil.SetNamespacePrivileged(oc, ns2)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: "none",
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, fmt.Sprintf("Can find NAD in ns %v", ns1))
errChk2 := chkNAD(oc, ns2, sriovnetwork.name, true)
exutil.AssertWaitPollWithErr(errChk2, fmt.Sprintf("Can not find NAD in ns %v", ns2))
//change networknamespace and check NAD
patchYamlToRestore := `[{"op":"replace","path":"/spec/networkNamespace","value":"` + ns2 + `"}]`
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovnetwork", sriovnetwork.name, "-n", sriovOpNs,
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := sriovnetwork.name + " patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
errChk1 = chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollWithErr(errChk1, fmt.Sprintf("Can not find NAD in ns %v after networknamespace changed", ns1))
errChk2 = chkNAD(oc, ns2, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk2, fmt.Sprintf("Can find NAD in ns %v after networknamespace changed", ns2))
})
| |||||
test case
|
openshift/openshift-tests-private
|
6e6bc3ac-b8b6-4daf-a229-e160175e3a2a
|
Author:yingwang-Medium-NonPreRelease-Longduration-25287-NAD should be able to restore by sriov operator when it was deleted
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-25287-NAD should be able to restore by sriov operator when it was deleted", func() {
var caseID = "25287-"
networkName := caseID + "net"
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
exutil.By("Create sriovNetwork to generate net-attach-def on the target namespace")
e2e.Logf("device ID is %v", data.DeviceID)
e2e.Logf("device Name is %v", data.Name)
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: "nonE",
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
spoolchk: "on",
trust: "on",
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, fmt.Sprintf("Can find NAD in ns %v", ns1))
//remove NAD and check again
rmNAD(oc, ns1, sriovnetwork.name)
errChk2 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk2, fmt.Sprintf("Can find NAD in ns %v as expected after NAD is removed", ns1))
})
| |||||
test case
|
openshift/openshift-tests-private
|
c5dfea7d-8bd2-45c7-a204-4c74d7bcbc39
|
Author:yingwang-Medium-NonPreRelease-Longduration-21364-Create pod with sriov-cni plugin and macvlan on the same interface [Disruptive]
|
['"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-Longduration-21364-Create pod with sriov-cni plugin and macvlan on the same interface [Disruptive]", func() {
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
var caseID = "21364-"
networkName := caseID + "net"
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-multinet-template.yaml")
netMacvlanTemplate := filepath.Join(buildPruningBaseDir, "nad-macvlan-template.yaml")
netMacVlanName := "macvlannet"
// Create VF on with given device
defer rmSriovNetworkPolicy(oc, data.Name, sriovOpNs)
result := initVF(oc, data.Name, data.DeviceID, data.InterfaceName, data.Vendor, sriovOpNs, vfNum)
// if the deviceid is not exist on the worker, skip this
if !result {
g.Skip("This nic which has deviceID is not found on this cluster!!!")
}
e2e.Logf("###############start to test %v sriov on nic %v ################", data.Name, data.InterfaceName)
exutil.By("Create sriovNetwork nad to generate net-attach-def on the target namespace")
sriovnetwork := sriovNetwork{
name: networkName,
resourceName: data.Name,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
linkState: "enable",
}
networkMacvlan := sriovNetResource{
name: netMacVlanName,
namespace: ns1,
kind: "NetworkAttachmentDefinition",
tempfile: netMacvlanTemplate,
}
//defer
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
defer networkMacvlan.delete(oc)
networkMacvlan.create(oc, "NADNAME="+networkMacvlan.name, "NAMESPACE="+networkMacvlan.namespace)
//create pods with both sriovnetwork and macvlan network
for i := 0; i < 2; i++ {
sriovTestPod := sriovNetResource{
name: "testpod" + strconv.Itoa(i),
namespace: ns1,
kind: "pod",
tempfile: sriovTestPodTemplate,
}
defer sriovTestPod.delete(oc)
sriovTestPod.create(oc, "PODNAME="+sriovTestPod.name, "NETWORKE1="+sriovnetwork.name, "NETWORKE2="+networkMacvlan.name, "NAMESPACE="+ns1)
err := waitForPodWithLabelReady(oc, sriovTestPod.namespace, "name="+sriovTestPod.name)
exutil.AssertWaitPollNoErr(err, "The new created pod is not ready")
}
chkPodsPassTraffic(oc, "testpod0", "testpod1", "net1", ns1)
chkPodsPassTraffic(oc, "testpod0", "testpod1", "net2", ns1)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5e315d49-f6d1-477d-b353-606e09de7f78
|
Author:yingwang-Medium-NonPreRelease-25847-SR-IOV operator-webhook can be disable by edit SR-IOV Operator Config [Serial]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-25847-SR-IOV operator-webhook can be disable by edit SR-IOV Operator Config [Serial]", func() {
// check webhook pods are running
chkPodsStatus(oc, sriovOpNs, "app=operator-webhook")
//disable webhook
defer chkSriovWebhookResource(oc, true)
defer chkPodsStatus(oc, sriovOpNs, "app=operator-webhook")
defer setSriovWebhook(oc, "true")
setSriovWebhook(oc, "false")
// webhook pods should be deleted
o.Eventually(func() string {
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=operator-webhook", "-n", sriovOpNs).Output()
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.ContainSubstring("No resources found"), fmt.Sprintf("sriov webhook pods are removed"))
chkSriovWebhookResource(oc, false)
// set webhook true
setSriovWebhook(oc, "true")
// webhook pods should be recovered
chkPodsStatus(oc, sriovOpNs, "app=operator-webhook")
chkSriovWebhookResource(oc, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5d306150-f2a1-4f49-989a-cf9b5df25cf0
|
Author:yingwang-Medium-NonPreRelease-25814-SR-IOV resource injector can be disable by edit SR-IOV Operator Config [Serial]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:yingwang-Medium-NonPreRelease-25814-SR-IOV resource injector can be disable by edit SR-IOV Operator Config [Serial]", func() {
// check network-resources-injector pods are running
chkPodsStatus(oc, sriovOpNs, "app=network-resources-injector")
//disable network-resources-injector
defer chkSriovInjectorResource(oc, true)
defer chkPodsStatus(oc, sriovOpNs, "app=network-resources-injector")
defer setSriovInjector(oc, "true")
setSriovInjector(oc, "false")
// network-resources-injector pods should be deleted
o.Eventually(func() string {
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-l", "app=network-resources-injector", "-n", sriovOpNs).Output()
return podStatus
}, 20*time.Second, 5*time.Second).Should(o.ContainSubstring("No resources found"), fmt.Sprintf("sriov network-resources-injector pods are removed"))
chkSriovInjectorResource(oc, false)
// set network-resources-injector true
setSriovInjector(oc, "true")
// network-resources-injector pods should be recovered
chkPodsStatus(oc, sriovOpNs, "app=network-resources-injector")
chkSriovInjectorResource(oc, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
ee2661fc-9ae9-49a1-8ec5-e412899f27c4
|
Author:jechen-Longduration-NonPreRelease-High-63533-ExternallyManaged: Recreate VFs when SR-IOV policy is applied [Disruptive][Flaky]
|
['"path/filepath"', '"strings"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:jechen-Longduration-NonPreRelease-High-63533-ExternallyManaged: Recreate VFs when SR-IOV policy is applied [Disruptive][Flaky]", func() {
nmstateCRTemplate := filepath.Join(testDataDir, "nmstate", "nmstate-cr-template.yaml")
nncpAddVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-vfs-specific-node-template.yaml")
nncpDelVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-remove-vfs-specific-node-template.yaml")
sriovNodeNetworkPolicyTemplate := filepath.Join(testDataDir, "sriov", "sriovnodepolicy-externallymanaged-template.yaml")
sriovNeworkTemplate := filepath.Join(testDataDir, "sriov", "sriovnetwork2-template.yaml")
sriovTestPodTemplate := filepath.Join(testDataDir, "sriov", "sriovtestpod2-with-mac-template.yaml")
opNamespace := "openshift-nmstate"
exutil.By("\n 1. Install nmstate operator and create nmstate CR \n")
installNMstateOperator(oc)
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("\n 2. Apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy := VFPolicyResource{
name: "vf-policy-63533",
intfname: sriovDevices[node].InterfaceName,
nodename: node,
totalvfs: 2,
template: nncpAddVFTemplate,
}
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 2.1 Verify the policy is applied \n")
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 2.2 Verify the created VFs found in node network state \n")
output, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 3. Create SR-IOV policy on the node with ExternallyManaged set to true \n")
sriovNNPolicy := sriovNetworkNodePolicySpecificNode{
policyName: "sriovnn",
deviceType: "netdevice",
pfName: sriovDevices[node].InterfaceName,
numVfs: 2,
resourceName: "sriovnn",
nodename: node,
namespace: sriovOpNs,
template: sriovNodeNetworkPolicyTemplate,
}
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 4. Create a target namespce, then create sriovNetwork to generate net-attach-def on the target namespace \n")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
sriovnetwork := sriovNetwork{
name: sriovNNPolicy.policyName,
resourceName: sriovNNPolicy.resourceName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
e2e.Logf("\n expect to see NAD of %s in namespace : %s\n", sriovnetwork.name, ns1)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
// exutil.By("\n 5. Create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Create test pod1 on the target namespace \n")
sriovTestPod1 := sriovTestPodMAC{
name: "sriov-63533-test-pod1",
namespace: ns1,
ipaddr: "192.168.10.1/24",
macaddr: "20:04:0f:f1:88:01",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 5.2 Create test pod2 on the target namespace \n")
sriovTestPod2 := sriovTestPodMAC{
name: "sriov-63533-test-pod2",
namespace: ns1,
ipaddr: "192.168.10.2/24",
macaddr: "",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 5.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
exutil.By("\n 6. Remove SR-IOV policy, wait for nns state to be stable, then verify VFs still remind \n")
removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
waitForSriovPolicyReady(oc, sriovOpNs)
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs reminded!!!", VFPolicy.totalvfs)
exutil.By("\n 7. Apply policy by nmstate to remove VFs then recreate VFs with one extra VF\n")
exutil.By("\n 7.1. Apply policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).ShouldNot(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are deleted correctly.\n", VFPolicy.totalvfs)
exutil.By("\n 7.2. Apply policy by nmstate to add VFs with an extra VF\n")
VFPolicy.template = nncpAddVFTemplate
VFPolicy.totalvfs = 3
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to recreate VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v2"),
), "Not all %d VFs are added correctly.\n", VFPolicy.totalvfs)
exutil.By("\n 8. Recreate test pods and verify connectivity betwen two pods\n")
sriovTestPod1.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
exutil.By("\n 9. Apply policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).ShouldNot(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v2"),
), "Not all %d VFs are deleted correctly.\n", VFPolicy.totalvfs)
})
| |||||
test case
|
openshift/openshift-tests-private
|
700d9d9b-dc9b-4129-a1d1-b157cb898148
|
Author:jechen-Longduration-NonPreRelease-Medium-63534-Verify ExternallyManaged SR-IOV network with options [Disruptive][Flaky]
|
['"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:jechen-Longduration-NonPreRelease-Medium-63534-Verify ExternallyManaged SR-IOV network with options [Disruptive][Flaky]", func() {
nmstateCRTemplate := filepath.Join(testDataDir, "nmstate", "nmstate-cr-template.yaml")
nncpAddVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-vfs-opt-specific-node-template.yaml")
nncpDelVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-remove-vfs-specific-node-template.yaml")
sriovNodeNetworkPolicyTemplate := filepath.Join(testDataDir, "sriov", "sriovnodepolicy-externallymanaged-template.yaml")
sriovNeworkTemplate := filepath.Join(testDataDir, "sriov", "sriovnetwork3-options-template.yaml")
sriovTestPodTemplate := filepath.Join(testDataDir, "sriov", "sriovtestpod2-with-mac-template.yaml")
opNamespace := "openshift-nmstate"
exutil.By("\n 1. Install nmstate operator and create nmstate CR \n")
installNMstateOperator(oc)
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("\n 2. Apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy := VFPolicyResource{
name: "vf-policy-63534",
intfname: sriovDevices[node].InterfaceName,
nodename: node,
totalvfs: 2,
template: nncpAddVFTemplate,
}
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 2.1 Verify the policy is applied \n")
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 2.2 Verify the created VFs found in node network state \n")
output, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 3. Create SR-IOV policy on the node with ExternallyManaged set to true \n")
sriovNNPolicy := sriovNetworkNodePolicySpecificNode{
policyName: "sriovnn",
deviceType: "netdevice",
pfName: sriovDevices[node].InterfaceName,
numVfs: 2,
resourceName: "sriovnn",
nodename: node,
namespace: sriovOpNs,
template: sriovNodeNetworkPolicyTemplate,
}
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 4. Create a target namespce, then create sriovNetwork to generate net-attach-def on the target namespace \n")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
sriovnetwork := sriovNetwork{
name: sriovNNPolicy.policyName,
resourceName: sriovNNPolicy.resourceName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
e2e.Logf("\n expect to see NAD of %s in namespace : %s\n", sriovnetwork.name, ns1)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
exutil.By("\n 5. Create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Test pods with IPv4, IPv6 and dualstack addresses will be tested in 3 iterations\n")
addressPool1 := []string{"192.168.10.1/24", "2001:db8:abcd:0012::1/64", "192.168.10.1/24\", \"2001:db8:abcd:0012::1/64"}
addressPool2 := []string{"192.168.10.2/24", "2001:db8:abcd:0012::2/64", "192.168.10.2/24\", \"2001:db8:abcd:0012::2/64"}
for i := 0; i < 3; i++ {
e2e.Logf("\n ************************* No %d set of test pods ******************\n", i+1)
exutil.By("\n Create test pod1 on the target namespace \n")
sriovTestPod1 := sriovTestPodMAC{
name: "sriov-63534-test-pod1",
namespace: ns1,
ipaddr: addressPool1[i],
macaddr: "20:04:0f:f1:88:01",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 5.2 Create test pod2 on the target namespace \n")
sriovTestPod2 := sriovTestPodMAC{
name: "sriov-63534-test-pod2",
namespace: ns1,
ipaddr: addressPool2[i],
macaddr: "",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 5.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
// wait a little before going to next iteration to recreate test pods with next set of addresses
time.Sleep(3 * time.Second)
}
exutil.By("\n 6. Remove SR-IOV policy, wait for nns state to be stable, then verify VFs still remind \n")
removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
waitForSriovPolicyReady(oc, sriovOpNs)
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs reminded!!!", VFPolicy.totalvfs)
exutil.By("\n 7. Apply policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).ShouldNot(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are deleted correctly.\n", VFPolicy.totalvfs)
})
| |||||
test case
|
openshift/openshift-tests-private
|
d4561934-9d6d-4dc0-a4a0-4a9a5c18e843
|
Author:jechen-Longduration-NonPreRelease-High-63527-High-63537-High-46528-High-46530-High-46532-High-46533-Verify ExternallyManaged functionality with different IP protocols before and after SRIOV operator removal and re-installation [Disruptive]
|
['"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_nic.go
|
g.It("Author:jechen-Longduration-NonPreRelease-High-63527-High-63537-High-46528-High-46530-High-46532-High-46533-Verify ExternallyManaged functionality with different IP protocols before and after SRIOV operator removal and re-installation [Disruptive]", func() {
nmstateCRTemplate := filepath.Join(testDataDir, "nmstate", "nmstate-cr-template.yaml")
nncpAddVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-vfs-specific-node-template.yaml")
nncpDelVFTemplate := filepath.Join(testDataDir, "nmstate", "nncp-remove-vfs-specific-node-template.yaml")
sriovNodeNetworkPolicyTemplate := filepath.Join(testDataDir, "sriov", "sriovnodepolicy-externallymanaged-template.yaml")
sriovNeworkTemplate := filepath.Join(testDataDir, "sriov", "sriovnetwork2-template.yaml")
sriovTestPodTemplate := filepath.Join(testDataDir, "sriov", "sriovtestpod2-with-mac-template.yaml")
opNamespace := "openshift-nmstate"
exutil.By("\n **************** Before SRIOV un-installation: verify externallyManaged SRIOV functionality ***********************\n")
exutil.By("\n 1. Before SRIOV un-stallation: install nmstate operator and create nmstate CR \n")
installNMstateOperator(oc)
nmstateCR := nmstateCRResource{
name: "nmstate",
template: nmstateCRTemplate,
}
defer deleteNMStateCR(oc, nmstateCR)
result, crErr := createNMStateCR(oc, nmstateCR, opNamespace)
exutil.AssertWaitPollNoErr(crErr, "create nmstate cr failed")
o.Expect(result).To(o.BeTrue())
e2e.Logf("SUCCESS - NMState CR Created")
exutil.By("\n 2. Before SRIOV un-stallation: Apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy := VFPolicyResource{
name: "vf-policy-63537",
intfname: sriovDevices[node].InterfaceName,
nodename: node,
totalvfs: 2,
template: nncpAddVFTemplate,
}
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 2.1 Verify the policy is applied \n")
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 2.2 Verify the created VFs found in node network state \n")
output, nnsErr1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 3. Before SRIOV un-stallation: create SR-IOV policy on the node with ExternallyManaged set to true \n")
sriovNNPolicy := sriovNetworkNodePolicySpecificNode{
policyName: "sriovnn",
deviceType: "netdevice",
pfName: sriovDevices[node].InterfaceName,
numVfs: 2,
resourceName: "sriovnn",
nodename: node,
namespace: sriovOpNs,
template: sriovNodeNetworkPolicyTemplate,
}
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 4. Before SRIOV un-stallation: create a target namespce, then create sriovNetwork to generate net-attach-def on the target namespace \n")
ns1 := oc.Namespace()
exutil.SetNamespacePrivileged(oc, ns1)
sriovnetwork := sriovNetwork{
name: sriovNNPolicy.policyName,
resourceName: sriovNNPolicy.resourceName,
networkNamespace: ns1,
template: sriovNeworkTemplate,
namespace: sriovOpNs,
}
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 := chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
exutil.By("\n 5. Before SRIOV un-stallation: create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Test pods with IPv4, IPv6 and dualstack addresses will be tested in 3 iterations\n")
addressPool1 := []string{"192.168.10.1/24", "2001:db8:abcd:0012::1/64", "192.168.10.1/24\", \"2001:db8:abcd:0012::1/64"}
addressPool2 := []string{"192.168.10.2/24", "2001:db8:abcd:0012::2/64", "192.168.10.2/24\", \"2001:db8:abcd:0012::2/64"}
var sriovTestPod1, sriovTestPod2 sriovTestPodMAC
for i := 0; i < 3; i++ {
e2e.Logf("\n ************************* No %d set of test pods ******************\n", i+1)
exutil.By("\n 5.1 Create test pod1 on the target namespace \n")
sriovTestPod1 = sriovTestPodMAC{
name: "sriov-test-pod1",
namespace: ns1,
ipaddr: addressPool1[i],
macaddr: "20:04:0f:f1:88:01",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 5.2 Create test pod2 on the target namespace \n")
sriovTestPod2 = sriovTestPodMAC{
name: "sriov-test-pod2",
namespace: ns1,
ipaddr: addressPool2[i],
macaddr: "",
sriovnetname: sriovnetwork.name,
tempfile: sriovTestPodTemplate,
}
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 5.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
// wait a little before going to next iteration to recreate test pods with next set of addresses
time.Sleep(3 * time.Second)
}
exutil.By("\n 6.1 Apply VF removal policy by nmstate to remove VFs\n")
VFPolicy.template = nncpDelVFTemplate
VFPolicy.createVFPolicy(oc)
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to delete VFs applied")
exutil.By("\n 6.2 Delete the VFPolicy\n")
deleteNNCP(oc, VFPolicy.name)
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("nncp", VFPolicy.name).Output()
o.Expect(strings.Contains(output, "not found")).To(o.BeTrue())
exutil.By("\n . ****************** SRIOV operator un-installation then re-installation ***********************\n")
exutil.By("\n 7. Uninstall SRIOV operator \n")
defer installSriovOperator(oc, sriovOpNs)
uninstallSriovOperator(oc, sriovOpNs)
exutil.By("\n 8. Re-install SRIOV operator")
installSriovOperator(oc, sriovOpNs)
// Due to https://bugzilla.redhat.com/show_bug.cgi?id=2033440, keep the placeholder but comment out the webhook failurePolicy check for now
// exutil.By("\n 3. Check webhook failurePolicy after re-installation \n")
// chkOutput, _ := exec.Command("bash", "-c", "oc get mutatingwebhookconfigurations network-resources-injector-config -oyaml | grep failurePolicy").Output()
// e2e.Logf("\n failurePolicy for mutatingwebhookconfigurations network-resources-injector-config: %s\n", chkOutput)
// o.Expect(strings.Contains(string(chkOutput), "Ignore")).To(o.BeTrue())
// chkOutput, _ = exec.Command("bash", "-c", "oc get mutatingwebhookconfigurations sriov-operator-webhook-config -oyaml | grep failurePolicy").Output()
// e2e.Logf("\n failurePolicy for mutatingwebhookconfigurations sriov-operator-webhook-config: %s\n", chkOutput)
// o.Expect(strings.Contains(string(chkOutput), "Ignore")).To(o.BeTrue())
// chkOutput, _ = exec.Command("bash", "-c", "oc get ValidatingWebhookConfiguration sriov-operator-webhook-config -oyaml | grep failurePolicy").Output()
// e2e.Logf("\n failurePolicy for ValidatingWebhookConfiguration sriov-operator-webhook-config: %s\n", chkOutput)
// o.Expect(strings.Contains(string(chkOutput), "Ignore")).To(o.BeTrue())
exutil.By("\n *********************** Post SRIOV re-installation: verify externallyManaged SRIOV functionality again ***********************\n")
exutil.By("\n 9. Post sriov re-installation: re-apply policy to create VFs on SR-IOV node by nmstate \n")
VFPolicy.template = nncpAddVFTemplate
// defer cleanup VFs by recreating VFPolicy with 0 VFs, then defer delete the VFPolicy
defer deleteNNCP(oc, VFPolicy.name)
defer func() {
ifaces, deferErr := exutil.DebugNodeRetryWithOptionsAndChroot(oc, VFPolicy.nodename, []string{"--quiet=true", "--to-namespace=default"}, "bash", "-c", "nmcli", "con", "show")
o.Expect(deferErr).NotTo(o.HaveOccurred())
VFPolicy.totalvfs = 0
if strings.Contains(ifaces, VFPolicy.intfname) {
VFPolicy.createVFPolicy(oc)
nncpErr1 := checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
}
}()
VFPolicy.createVFPolicy(oc)
exutil.By("\n 9.1 Verify the policy is applied \n")
nncpErr1 = checkNNCPStatus(oc, VFPolicy.name, "Available")
exutil.AssertWaitPollNoErr(nncpErr1, "policy applied failed")
e2e.Logf("SUCCESS - NNCP policy to create VFs applied")
exutil.By("\n 9.2 Verify the created VFs found in node network state \n")
output, nnsErr1 = oc.AsAdmin().WithoutNamespace().Run("get").Args("nns", node, "-ojsonpath={.status.currentState.interfaces[?(@.name==\""+sriovDevices[node].InterfaceName+"\")].ethernet.sr-iov.vfs}").Output()
o.Expect(nnsErr1).NotTo(o.HaveOccurred())
e2e.Logf("\n output: %v\n", output)
o.Expect(output).Should(o.And(
o.ContainSubstring(sriovDevices[node].InterfaceName+"v0"),
o.ContainSubstring(sriovDevices[node].InterfaceName+"v1"),
), "Not all %d VFs are created.\n", VFPolicy.totalvfs)
exutil.By("\n 10. Post sriov re-installation: re-create SR-IOV policy on the node with ExternallyManaged set to true \n")
defer removeResource(oc, true, true, "SriovNetworkNodePolicy", sriovNNPolicy.policyName, "-n", sriovOpNs)
sriovNNPolicy.createPolicySpecificNode(oc)
waitForSriovPolicyReady(oc, sriovOpNs)
exutil.By("\n 11. Post sriov re-installation: re-create sriovNetwork to generate net-attach-def on the target namespace \n")
defer rmSriovNetwork(oc, sriovnetwork.name, sriovOpNs)
sriovnetwork.createSriovNetwork(oc)
errChk1 = chkNAD(oc, ns1, sriovnetwork.name, true)
exutil.AssertWaitPollNoErr(errChk1, "Did not find NAD in the namespace")
exutil.By("\n 12. Post sriov re-installation: re-create test pod1 with static MAC and test pod2 with dynamic MAC in target namespace\n")
exutil.By("\n Test pods with IPv4, IPv6 and dualstack addresses will be tested in 3 iterations\n")
for i := 0; i < 3; i++ {
e2e.Logf("\n ************************* No %d set of test pods ******************\n", i+1)
exutil.By("\n 12.1 Create test pod1 on the target namespace \n")
sriovTestPod1.ipaddr = addressPool1[i]
sriovTestPod1.createSriovTestPodMAC(oc)
err := waitForPodWithLabelReady(oc, sriovTestPod1.namespace, "app="+sriovTestPod1.name)
exutil.AssertWaitPollNoErr(err, "SRIOV client test pod is not ready")
exutil.By("\n 12.2 Create test pod2 on the target namespace \n")
sriovTestPod2.ipaddr = addressPool2[i]
sriovTestPod2.createSriovTestPodMAC(oc)
err = waitForPodWithLabelReady(oc, sriovTestPod2.namespace, "app="+sriovTestPod2.name)
exutil.AssertWaitPollNoErr(err, "SRIOV server test pod is not ready")
exutil.By("\n 12.3 Check traffic between two test pods \n")
chkPodsPassTraffic(oc, sriovTestPod1.name, sriovTestPod2.name, "net1", ns1)
chkPodsPassTraffic(oc, sriovTestPod2.name, sriovTestPod1.name, "net1", ns1)
removeResource(oc, true, true, "pod", sriovTestPod1.name, "-n", sriovTestPod1.namespace)
removeResource(oc, true, true, "pod", sriovTestPod2.name, "-n", sriovTestPod2.namespace)
// wait a little before going to next iteration to recreate test pods with next set of addresses
time.Sleep(3 * time.Second)
}
})
| |||||
file
|
openshift/openshift-tests-private
|
6b16e49b-c60f-4ceb-9383-d763868bb775
|
sriov_util
|
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
package networking
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
// struct for sriovnetworknodepolicy and sriovnetwork
type sriovNetResource struct {
name string
namespace string
tempfile string
kind string
ip string
}
type sriovNetworkNodePolicy struct {
policyName string
deviceType string
pfName string
deviceID string
vendor string
numVfs int
resourceName string
template string
namespace string
}
type sriovNetwork struct {
name string
resourceName string
networkNamespace string
template string
namespace string
spoolchk string
trust string
vlanId int
linkState string
minTxRate int
maxTxRate int
vlanQoS int
}
type sriovTestPod struct {
name string
namespace string
networkName string
template string
}
// struct for sriov pod
type sriovPod struct {
name string
tempfile string
namespace string
ipv4addr string
ipv6addr string
intfname string
intfresource string
pingip string
}
// struct for using nncp to create VF on sriov node
type VFPolicyResource struct {
name string
intfname string
nodename string
totalvfs int
template string
}
type sriovNetworkNodePolicySpecificNode struct {
policyName string
deviceType string
pfName string
numVfs int
resourceName string
nodename string
namespace string
template string
}
// struct for sriov pod with static or dynamic MAC address
type sriovTestPodMAC struct {
name string
namespace string
ipaddr string
macaddr string
sriovnetname string
tempfile string
}
// delete sriov resource
func (rs *sriovNetResource) delete(oc *exutil.CLI) {
e2e.Logf("delete %s %s in namespace %s", rs.kind, rs.name, rs.namespace)
oc.AsAdmin().WithoutNamespace().Run("delete").Args(rs.kind, rs.name, "-n", rs.namespace).Execute()
}
// create sriov resource
func (rs *sriovNetResource) create(oc *exutil.CLI, parameters ...string) {
var configFile string
cmd := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
cmd = append(cmd, para)
}
e2e.Logf("parameters list is %s\n", cmd)
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(cmd...).OutputToFile(getRandomString() + "config.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process sriov resource %v", cmd))
e2e.Logf("the file of resource is %s\n", configFile)
_, err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", rs.namespace).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
}
// porcess sriov pod template and get a configuration file
func (pod *sriovPod) processPodTemplate(oc *exutil.CLI) string {
var configFile string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
parameters := fmt.Sprintf("-p PODNAME=%s, SRIOVNETNAME=%s, IPV4_ADDR=%s, IPV6_ADDR=%s", pod.name, pod.intfresource, pod.ipv4addr, pod.ipv6addr)
if pod.pingip != "" {
parameters += pod.pingip
}
output, err := oc.AsAdmin().Run("process").Args("-f", pod.tempfile, "--ignore-unknown-parameters=true", parameters, "-o=jsonpath={.items[0]}").OutputToFile(getRandomString() + "config.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process pod resource %v", pod.name))
e2e.Logf("the file of resource is %s\n", configFile)
return configFile
}
// create pod
func (pod *sriovPod) createPod(oc *exutil.CLI) string {
configFile := pod.processPodTemplate(oc)
podsLog, err1 := oc.AsAdmin().WithoutNamespace().Run("create").Args("--loglevel=10", "-f", configFile, "-n", pod.namespace).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
return podsLog
}
// delete pod
func (pod *sriovPod) deletePod(oc *exutil.CLI) {
e2e.Logf("delete pod %s in namespace %s", pod.name, pod.namespace)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.name, "-n", pod.namespace).Execute()
}
// check pods of openshift-sriov-network-operator are running
func chkSriovOperatorStatus(oc *exutil.CLI, ns string) {
e2e.Logf("check if openshift-sriov-network-operator pods are running properly")
chkPodsStatus(oc, ns, "app=network-resources-injector")
chkPodsStatus(oc, ns, "app=operator-webhook")
chkPodsStatus(oc, ns, "app=sriov-network-config-daemon")
chkPodsStatus(oc, ns, "name=sriov-network-operator")
}
// check specified pods are running
func chkPodsStatus(oc *exutil.CLI, ns, lable string) {
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
podsStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", ns, "-l", lable, "-o=jsonpath={.items[*].status.phase}").Output()
if err != nil {
return false, err
}
podsStatus = strings.TrimSpace(podsStatus)
statusList := strings.Split(podsStatus, " ")
for _, podStat := range statusList {
if strings.Compare(podStat, "Running") != 0 {
return false, nil
}
}
e2e.Logf("All pods with lable %s in namespace %s are Running", lable, ns)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("pod with label %s in namespace %v does not running", lable, ns))
}
// clear specified sriovnetworknodepolicy
func rmSriovNetworkPolicy(oc *exutil.CLI, policyname, ns string) {
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("SriovNetworkNodePolicy", policyname, "-n", ns, "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("remove sriovnetworknodepolicy %s", policyname)
waitForSriovPolicyReady(oc, ns)
}
// clear specified sriovnetwork
func rmSriovNetwork(oc *exutil.CLI, netname, ns string) {
sriovNetList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("SriovNetwork", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(sriovNetList, netname) {
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("SriovNetwork", netname, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// Wait for Pod ready
func (pod *sriovPod) waitForPodReady(oc *exutil.CLI) {
res := false
err := wait.Poll(5*time.Second, 15*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod.name, "-n", pod.namespace, "-o=jsonpath={.status.phase}").Output()
e2e.Logf("the status of pod is %s", status)
if strings.Contains(status, "NotFound") {
e2e.Logf("the pod was created fail.")
res = false
return true, nil
}
if err != nil {
e2e.Logf("failed to get pod status: %v, retrying...", err)
return false, nil
}
if strings.Contains(status, "Running") {
e2e.Logf("the pod is Ready.")
res = true
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("sriov pod %v is not ready", pod.name))
o.Expect(res).To(o.Equal(true))
}
// Wait for sriov network policy ready
func waitForSriovPolicyReady(oc *exutil.CLI, ns string) {
err := wait.Poll(10*time.Second, 30*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", "-n", ns, "-o=jsonpath={.items[*].status.syncStatus}").Output()
e2e.Logf("the status of sriov policy is %v", status)
if err != nil {
e2e.Logf("failed to get sriov policy status: %v, retrying...", err)
return false, nil
}
nodesStatus := strings.TrimSpace(status)
statusList := strings.Split(nodesStatus, " ")
for _, nodeStat := range statusList {
if nodeStat != "Succeeded" {
e2e.Logf("nodes sync up not ready yet: %v, retrying...", err)
return false, nil
}
}
e2e.Logf("nodes sync up ready now")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "sriovnetworknodestates is not ready")
}
// check interface on pod
func (pod *sriovPod) getSriovIntfonPod(oc *exutil.CLI) string {
msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(pod.name, "-n", pod.namespace, "-i", "--", "ip", "address").Output()
if err != nil {
e2e.Logf("Execute ip address command failed with err:%v .", err)
}
e2e.Logf("Get ip address info as:%v", msg)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
return msg
}
// create pod via HTTP request
func (pod *sriovPod) sendHTTPRequest(oc *exutil.CLI, user, cmd string) {
//generate token for service acount
testToken, err := oc.AsAdmin().WithoutNamespace().Run("sa").Args("get-token", user, "-n", pod.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(testToken).NotTo(o.BeEmpty())
configFile := pod.processPodTemplate(oc)
curlCmd := cmd + " -k " + " -H " + fmt.Sprintf("\"Authorization: Bearer %v\"", testToken) + " -d " + "@" + configFile
e2e.Logf("Send curl request to create new pod: %s\n", curlCmd)
res, err := exec.Command("bash", "-c", curlCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(res).NotTo(o.BeEmpty())
}
func (sriovPolicy *sriovNetworkNodePolicy) createPolicy(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovPolicy.template, "-p", "NAMESPACE="+sriovPolicy.namespace, "DEVICEID="+sriovPolicy.deviceID, "SRIOVNETPOLICY="+sriovPolicy.policyName, "DEVICETYPE="+sriovPolicy.deviceType, "PFNAME="+sriovPolicy.pfName, "VENDOR="+sriovPolicy.vendor, "NUMVFS="+strconv.Itoa(sriovPolicy.numVfs), "RESOURCENAME="+sriovPolicy.resourceName)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create sriovnetworknodePolicy %v", sriovPolicy.policyName))
}
func (sriovNetwork *sriovNetwork) createSriovNetwork(oc *exutil.CLI) {
err := wait.Poll(2*time.Second, 20*time.Second, func() (bool, error) {
if sriovNetwork.spoolchk == "" {
sriovNetwork.spoolchk = "off"
}
if sriovNetwork.trust == "" {
sriovNetwork.trust = "on"
}
if sriovNetwork.linkState == "" {
sriovNetwork.linkState = "auto"
}
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovNetwork.template, "-p", "NAMESPACE="+sriovNetwork.namespace, "SRIOVNETNAME="+sriovNetwork.name, "TARGETNS="+sriovNetwork.networkNamespace, "SRIOVNETPOLICY="+sriovNetwork.resourceName, "SPOOFCHK="+sriovNetwork.spoolchk, "TRUST="+sriovNetwork.trust, "LINKSTATE="+sriovNetwork.linkState, "MINTXRATE="+strconv.Itoa(sriovNetwork.minTxRate), "MAXTXRATE="+strconv.Itoa(sriovNetwork.maxTxRate), "VLANID="+strconv.Itoa(sriovNetwork.vlanId), "VLANQOS="+strconv.Itoa(sriovNetwork.vlanQoS))
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create sriovnetwork %v", sriovNetwork.name))
}
func (sriovTestPod *sriovTestPod) createSriovTestPod(oc *exutil.CLI) {
err := wait.Poll(2*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovTestPod.template, "-p", "PODNAME="+sriovTestPod.name, "SRIOVNETNAME="+sriovTestPod.networkName, "NAMESPACE="+sriovTestPod.namespace)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create test pod %v", sriovTestPod.name))
}
// get the pciAddress pod is used
func getPciAddress(namespace string, podName string, policyName string) string {
pciAddress, err := e2eoutput.RunHostCmdWithRetries(namespace, podName, "printenv PCIDEVICE_OPENSHIFT_IO_"+strings.ToUpper(policyName), 3*time.Second, 30*time.Second)
e2e.Logf("Get the pci address env is: %s", pciAddress)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pciAddress).NotTo(o.BeEmpty())
return strings.TrimSuffix(pciAddress, "\n")
}
// Get the sriov worker which the policy is used
func getSriovNode(oc *exutil.CLI, namespace string, label string) string {
sriovNodeName := ""
nodeNamesAll, err := oc.AsAdmin().Run("get").Args("-n", namespace, "node", "-l", label, "-ojsonpath={.items..metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames := strings.Split(nodeNamesAll, " ")
for _, nodeName := range nodeNames {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", nodeName, "-n", namespace, "-ojsonpath={.spec.interfaces}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output != "" {
sriovNodeName = nodeName
break
}
}
e2e.Logf("The sriov node is %v ", sriovNodeName)
o.Expect(sriovNodeName).NotTo(o.BeEmpty())
return sriovNodeName
}
// checkDeviceIDExist will check the worker node contain the network card according to deviceID
func checkDeviceIDExist(oc *exutil.CLI, namespace string, deviceID string) bool {
allDeviceID, err := oc.AsAdmin().Run("get").Args("sriovnetworknodestates", "-n", namespace, "-ojsonpath={.items[*].status.interfaces[*].deviceID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("tested deviceID is %v and all supported deviceID on node are %v ", deviceID, allDeviceID)
return strings.Contains(allDeviceID, deviceID)
}
// Wait for sriov network policy ready
func (rs *sriovNetResource) chkSriovPolicy(oc *exutil.CLI) bool {
sriovPolicyList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("SriovNetworkNodePolicy", "-n", rs.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(sriovPolicyList, rs.name) {
return false
}
return true
}
// Wait for nodes starting to sync up for sriov policy
func waitForSriovPolicySyncUpStart(oc *exutil.CLI, ns string) {
err := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", "-n", ns, "-o=jsonpath={.items[*].status.syncStatus}").Output()
e2e.Logf("the status of sriov policy is %s", status)
if err != nil {
e2e.Logf("failed to get sriov policy status: %v, retrying...", err)
return false, nil
}
nodesStatus := strings.TrimSpace(status)
statusList := strings.Split(nodesStatus, " ")
for _, nodeStat := range statusList {
if nodeStat == "InProgress" {
e2e.Logf("nodes start to sync up ...", err)
return true, nil
}
}
e2e.Logf("nodes sync up hasn't started yet ...")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "sriovnetworknodestates sync up is in progress")
}
func getOperatorVersion(oc *exutil.CLI, subname string, ns string) string {
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", subname, "-n", ns, "-o=jsonpath={.status.currentCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
e2e.Logf("operator version is %s", csvName)
return csvName
}
// find the node name which is in scheduleDisableNode
func findSchedulingDisabledNode(oc *exutil.CLI, interval, timeout time.Duration, label string) string {
scheduleDisableNodeName := ""
errNode := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(ctx context.Context) (bool, error) {
nodeNamesAll, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", label, "-ojsonpath={.items..metadata.name}").Output()
if err != nil {
return false, nil
}
nodeNames := strings.Split(nodeNamesAll, " ")
for _, nodeName := range nodeNames {
nodeOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName).Output()
if err == nil {
if strings.Contains(nodeOutput, "NotReady") || strings.Contains(nodeOutput, "SchedulingDisabled") {
scheduleDisableNodeName = nodeName
break
}
}
}
if scheduleDisableNodeName == "" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errNode, fmt.Sprintf("no node become SchedulingDisabled or Notready!"))
return scheduleDisableNodeName
}
func chkVFStatusMatch(oc *exutil.CLI, nodeName, nicName, macAddress, expectVaule string) {
cmd := fmt.Sprintf("ip link show %s | grep %s", nicName, macAddress)
output, debugNodeErr := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd)
e2e.Logf("The ip link show. \n %v", output)
o.Expect(debugNodeErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, expectVaule)).To(o.BeTrue())
}
func initVF(oc *exutil.CLI, name, deviceID, interfaceName, vendor, ns string, vfNum int) bool {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovNetworkNodePolicyTemplate := filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml")
sriovPolicy := sriovNetworkNodePolicy{
policyName: name,
deviceType: "netdevice",
deviceID: deviceID,
pfName: interfaceName,
vendor: vendor,
numVfs: vfNum,
resourceName: name,
template: sriovNetworkNodePolicyTemplate,
namespace: ns,
}
exutil.By("Check the deviceID if exist on the cluster worker")
e2e.Logf("Create VF on name: %s, deviceID: %s, interfacename: %s, vendor: %s", name, deviceID, interfaceName, vendor)
if !chkPfExist(oc, deviceID, interfaceName) {
e2e.Logf("the cluster do not contain the sriov card. skip this testing!")
return false
}
//defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs)
sriovPolicy.createPolicy(oc)
waitForSriovPolicyReady(oc, ns)
return true
}
func initDpdkVF(oc *exutil.CLI, name, deviceID, interfaceName, vendor, ns string, vfNum int) bool {
deviceType := "vfio-pci"
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovNetworkNodePolicyTemplate := filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml")
if vendor == "15b3" {
deviceType = "netdevice"
sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-mlx-dpdk-template.yaml")
}
sriovPolicy := sriovNetworkNodePolicy{
policyName: name,
deviceType: deviceType,
deviceID: deviceID,
pfName: interfaceName,
vendor: vendor,
numVfs: vfNum,
resourceName: name,
template: sriovNetworkNodePolicyTemplate,
namespace: ns,
}
exutil.By("Check the deviceID if exist on the cluster worker")
e2e.Logf("Create VF on name: %s, deviceID: %s, interfacename: %s, vendor: %s", name, deviceID, interfaceName, vendor)
if !chkPfExist(oc, deviceID, interfaceName) {
e2e.Logf("the cluster do not contain the sriov card. skip this testing!")
return false
}
// create dpdk policy
sriovPolicy.createPolicy(oc)
waitForSriovPolicyReady(oc, ns)
return true
}
func chkVFStatusWithPassTraffic(oc *exutil.CLI, nadName, nicName, ns, expectVaule string) {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml")
exutil.By("Create test pod on the target namespace")
for i := 0; i < 2; i++ {
sriovTestPod := sriovTestPod{
name: "testpod" + strconv.Itoa(i),
namespace: ns,
networkName: nadName,
template: sriovTestPodTemplate,
}
sriovTestPod.createSriovTestPod(oc)
err := waitForPodWithLabelReady(oc, ns, "name=sriov-netdevice")
exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready")
if strings.Contains(expectVaule, "mtu") {
mtucheck, err := e2eoutput.RunHostCmdWithRetries(ns, sriovTestPod.name, "ip addr show net1", 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(mtucheck, expectVaule)).To(o.BeTrue())
} else {
nodeName, nodeNameErr := exutil.GetPodNodeName(oc, ns, sriovTestPod.name)
o.Expect(nodeNameErr).NotTo(o.HaveOccurred())
podMac := getInterfaceMac(oc, ns, sriovTestPod.name, "net1")
chkVFStatusMatch(oc, nodeName, nicName, podMac, expectVaule)
}
}
chkPodsPassTraffic(oc, "testpod0", "testpod1", "net1", ns)
}
func chkPodsPassTraffic(oc *exutil.CLI, pod1name string, pod2name string, infName string, ns string) {
exutil.By("Check the interface is connected, if not skip the connect testing")
cmd := "ip addr show " + infName
podConnectStatus1, err1 := e2eoutput.RunHostCmdWithRetries(ns, pod1name, cmd, 3*time.Second, 30*time.Second)
o.Expect(err1).NotTo(o.HaveOccurred())
podConnectStatus2, err2 := e2eoutput.RunHostCmdWithRetries(ns, pod2name, cmd, 3*time.Second, 30*time.Second)
o.Expect(err2).NotTo(o.HaveOccurred())
e2e.Logf("The ip connection of %v show: \n %v", pod1name, podConnectStatus1)
e2e.Logf("The ip connection of %v show: \n %v", pod2name, podConnectStatus2)
//if podConnectStatus including NO-CARRIER, then skip the connection testing
if !strings.Contains(podConnectStatus1, "NO-CARRIER") && !strings.Contains(podConnectStatus2, "NO-CARRIER") {
exutil.By("Get destination Pod's IP on secondary interface")
cmd = "ip addr show dev " + infName + " | grep global"
net1Output, _ := e2eoutput.RunHostCmdWithRetries(ns, pod2name, cmd, 3*time.Second, 30*time.Second)
net1Output = strings.TrimSpace(net1Output)
// Match our IPv4 and IPv6 address on net1 ip address output
rev4 := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)
ipv4Addresses := rev4.FindAllString(net1Output, -1)
rev6 := regexp.MustCompile(`inet6\s+([0-9a-fA-F:]{2,39})(?:/\d{1,3})?`)
ipv6Addresses := rev6.FindAllStringSubmatch(net1Output, -1)
for _, match := range ipv6Addresses {
if len(match) > 1 {
ipv6Address := match[1]
e2e.Logf("\n destination pod %s net1 IPv6 address: %s\n", pod2name, ipv6Address)
CurlMultusPod2PodPass(oc, ns, pod1name, ipv6Address, infName, "Hello")
}
}
e2e.Logf("\n destination pod %s net1 IPv4 address: %s\n", pod2name, ipv4Addresses)
if len(ipv4Addresses) != 0 {
CurlMultusPod2PodPass(oc, ns, pod1name, ipv4Addresses[0], infName, "Hello")
}
}
}
func (sriovTestPod *sriovTestPod) deleteSriovTestPod(oc *exutil.CLI) {
e2e.Logf("delete pod %s in namespace %s", sriovTestPod.name, sriovTestPod.namespace)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", sriovTestPod.name, "-n", sriovTestPod.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func createNumPods(oc *exutil.CLI, nadName, ns, podPrex string, numPods int) {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml")
exutil.By("Create test pod on the target namespace")
for i := 0; i < numPods; i++ {
sriovTestPod := sriovTestPod{
name: podPrex + strconv.Itoa(i),
namespace: ns,
networkName: nadName,
template: sriovTestPodTemplate,
}
sriovTestPod.createSriovTestPod(oc)
}
err := waitForPodWithLabelReady(oc, ns, "name=sriov-netdevice")
exutil.AssertWaitPollNoErr(err, "pods with label name=sriov-netdevice not ready")
e2e.Logf("Have successfully created %v pods", numPods)
}
// get worker nodes which have sriov enabled.
func getSriovWokerNodes(oc *exutil.CLI) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "feature.node.kubernetes.io/sriov-capable=true",
"-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
}
// get worker node which has required PF
func getWorkerNodesWithNic(oc *exutil.CLI, deviceid string, pfname string) []string {
workerWithNicList := []string{}
nodeNameList := getSriovWokerNodes(oc)
e2e.Logf("print all worker nodes %v", nodeNameList)
for _, workerNode := range nodeNameList {
output, checkNicErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates.sriovnetwork.openshift.io", workerNode,
"-n", "openshift-sriov-network-operator", "-o=jsonpath={.status.interfaces}").Output()
o.Expect(checkNicErr).NotTo(o.HaveOccurred())
nicList := strings.Split(output, "}")
for _, nicInfo := range nicList {
// at least one worker node should have required PF.
re1 := regexp.MustCompile(`\"` + pfname + `\"`)
re2 := regexp.MustCompile(`\"deviceID\":\"` + deviceid + `\"`)
if re1.MatchString(nicInfo) && re2.MatchString(nicInfo) {
e2e.Logf("on worker node %v, find PF %v!!", workerNode, pfname)
workerWithNicList = append(workerWithNicList, workerNode)
}
}
e2e.Logf("The worker list which has device id %v, pfname %v is %v", deviceid, pfname, workerWithNicList)
}
return workerWithNicList
}
// check if required pf exist on workers
func chkPfExist(oc *exutil.CLI, deviceid string, pfname string) bool {
res := true
workerList := getWorkerNodesWithNic(oc, deviceid, pfname)
if len(workerList) == 0 {
e2e.Logf("The worker nodes don't have the required PF %v with DeviceID %v", pfname, deviceid)
res = false
}
return res
}
func chkNAD(oc *exutil.CLI, ns string, name string, expected bool) error {
return wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("net-attach-def", "-n", ns, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the NAD list in ns %v is %v", ns, output)
if expected && !strings.Contains(output, name) {
e2e.Logf("Can not get NAD, got err:%v, and try next round", err)
return false, nil
}
if !expected && strings.Contains(output, name) {
e2e.Logf("NAD has not beem removed, try again")
return false, nil
}
return true, nil
})
}
func rmNAD(oc *exutil.CLI, ns string, name string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", "-n", ns, name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("the NAD %v is removed", name)
}
func setSriovWebhook(oc *exutil.CLI, status string) {
//enable webhook
var patchYamlToRestore string
if status == "true" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableOperatorWebhook","value": true}]`
} else if status == "false" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableOperatorWebhook","value": false}]`
}
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator",
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := "sriovoperatorconfig.sriovnetwork.openshift.io/default patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
// check webhook is set correctly
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator", "-o=jsonpath={.spec.enableOperatorWebhook}").Output()
e2e.Logf("the status of sriov webhook is %v", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(status))
}
func chkSriovWebhookResource(oc *exutil.CLI, status bool) {
output1, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ds", "operator-webhook", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output1)
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "operator-webhook-sa", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output2)
output3, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "operator-webhook-service", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output3)
output4, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrole", "operator-webhook", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output4)
output5, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("MutatingWebhookConfiguration", "sriov-operator-webhook-config", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output5)
output6, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrolebinding", "operator-webhook-role-binding ", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output6)
if status == true {
o.Expect(output1).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output2).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output3).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output4).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output5).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output6).Should(o.ContainSubstring("operator-webhook"))
} else {
o.Expect(output1).Should(o.ContainSubstring("not found"))
o.Expect(output2).Should(o.ContainSubstring("not found"))
o.Expect(output3).Should(o.ContainSubstring("not found"))
o.Expect(output4).Should(o.ContainSubstring("not found"))
o.Expect(output5).Should(o.ContainSubstring("not found"))
o.Expect(output6).Should(o.ContainSubstring("not found"))
}
}
func setSriovInjector(oc *exutil.CLI, status string) {
//enable sriov resource injector
var patchYamlToRestore string
if status == "true" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableInjector","value": true}]`
} else if status == "false" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableInjector","value": false}]`
}
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator",
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := "sriovoperatorconfig.sriovnetwork.openshift.io/default patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
// check injector is set correctly
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator", "-o=jsonpath={.spec.enableInjector}").Output()
e2e.Logf("the status of sriov resource injector is %v", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(status))
}
func chkSriovInjectorResource(oc *exutil.CLI, status bool) {
output1, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ds", "network-resources-injector", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output1)
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "network-resources-injector-sa", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output2)
output3, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "network-resources-injector-service", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output3)
output4, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrole", "network-resources-injector", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output4)
output5, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("MutatingWebhookConfiguration", "network-resources-injector-config", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output5)
output6, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrolebinding", "network-resources-injector-role-binding", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output6)
if status == true {
o.Expect(output1).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output2).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output3).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output4).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output5).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output6).Should(o.ContainSubstring("network-resources-injector"))
} else {
o.Expect(output1).Should(o.ContainSubstring("not found"))
o.Expect(output2).Should(o.ContainSubstring("not found"))
o.Expect(output3).Should(o.ContainSubstring("not found"))
o.Expect(output4).Should(o.ContainSubstring("not found"))
o.Expect(output5).Should(o.ContainSubstring("not found"))
o.Expect(output6).Should(o.ContainSubstring("not found"))
}
}
func pingPassWithNet1(oc *exutil.CLI, ns1, pod1, pod2 string) {
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1)
e2e.Logf("The second interface v4 address of pod1 is: %v", pod1IPv4)
command := fmt.Sprintf("ping -c 3 %s", pod1IPv4)
pingOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod2, command, 3*time.Second, 12*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ping output: %v", pingOutput)
o.Expect(strings.Count(pingOutput, "3 received")).To(o.Equal(1))
}
// get the Mac address from one pod interface
func getInterfaceMac(oc *exutil.CLI, namespace, podName, interfaceName string) string {
command := fmt.Sprintf("ip link show %s | awk '/link\\/ether/ {print $2}'", interfaceName)
podInterfaceMac, err := e2eoutput.RunHostCmdWithRetries(namespace, podName, command, 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
podInterfaceMac = strings.TrimSpace(podInterfaceMac)
return podInterfaceMac
}
// get the catlogsource name
func getOperatorSource(oc *exutil.CLI, namespace string) string {
catalogSourceNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(catalogSourceNames, "auto-release-app-registry") {
return "auto-release-app-registry"
} else if strings.Contains(catalogSourceNames, "qe-app-registry") {
return "qe-app-registry"
} else {
return ""
}
}
// check if interface is not connected with CARRIER
func checkInterfaceNoCarrier(oc *exutil.CLI, nodeName string, interfaceName string) bool {
var output string
var err error
checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
output, err = exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "/usr/sbin/ip address show dev "+interfaceName)
if output == "" || err != nil {
e2e.Logf("Did not get node's management interface, errors: %v, try again", err)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to check if interface %s on node %s has carrier, err: %v", interfaceName, nodeName, checkErr))
return strings.Contains(output, "NO-CARRIER")
}
// Create VF policy through NMstate
func (vfpolicy *VFPolicyResource) createVFPolicy(oc *exutil.CLI) error {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vfpolicy.template, "-p", "NAME="+vfpolicy.name, "INTFNAME="+vfpolicy.intfname, "NODENAME="+vfpolicy.nodename, "TOTALVFS="+strconv.Itoa(int(vfpolicy.totalvfs)))
if err1 != nil {
e2e.Logf("Creating VF on sriov node failed :%v, and try next round", err1)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("fail to VF on sriov node %v", vfpolicy.name)
}
return nil
}
// Create SRIOVNetworkNodePolicy on specific node
func (sriovNNPolicy *sriovNetworkNodePolicySpecificNode) createPolicySpecificNode(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovNNPolicy.template, "-p", "NAMESPACE="+sriovNNPolicy.namespace, "SRIOVNETPOLICY="+sriovNNPolicy.policyName, "DEVICETYPE="+sriovNNPolicy.deviceType, "PFNAME="+sriovNNPolicy.pfName, "NUMVFS="+strconv.Itoa(sriovNNPolicy.numVfs), "RESOURCENAME="+sriovNNPolicy.resourceName, "NODENAME="+sriovNNPolicy.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create sriovnetworknodePolicy %v", sriovNNPolicy.policyName))
}
// Create SRIOV test pod with MAC address
func (sriovTestPod *sriovTestPodMAC) createSriovTestPodMAC(oc *exutil.CLI) {
err := wait.Poll(2*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovTestPod.tempfile, "-p", "PODNAME="+sriovTestPod.name, "SRIOVNETNAME="+sriovTestPod.sriovnetname, "TARGETNS="+sriovTestPod.namespace, "IP_ADDR="+sriovTestPod.ipaddr, "MAC_ADDR="+sriovTestPod.macaddr)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create test pod %v", sriovTestPod.name))
}
// uninstall SRIOV operator, but leave SriovNetworkPoolConfig and openshift-sriov-network-operator namespace behind so offload cases are not impacted
func uninstallSriovOperator(oc *exutil.CLI, namespace string) {
// Delete SRIOV network related config except SriovNetworkPoolConfig, subscription, CSV, and DS under openshift-sriov-network-operator namespace
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("sriovnetwork", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting sirovnetwork")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("sriovnetworknodepolicy", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting sirovnetworknodepolicy")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("subscription", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting subscription under openshift-sriov-network-operator namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("csv", "--all", "-n", namespace).Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting CSV under openshift-sriov-network-operator namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ds", "--all", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting DS under openshift-sriov-network-operator namespace")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment", "sriov-network-operator", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting deployment/sriov-network-operator under openshift-sriov-network-operator namespace")
// Verify SRIOV network related config, subscription, CSV, and DS under openshift-sriov-network-operator namespace are removed
sriovconfigs := [6]string{"sriovnetwork", "sriovnetworknodepolicy", "subscription", "csv", "ds", "deployment"}
for _, config := range sriovconfigs {
sriovChkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(config, "-n", namespace).Output()
e2e.Logf("\n output after deleting %s: %s\n", config, output)
if strings.Contains(output, "not found") || strings.Contains(output, "No resources found") {
e2e.Logf("mutatingwebhookconfigs %s is delete successfully", config)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(sriovChkErr, fmt.Sprintf("Failed to delete resource %s", config))
}
// Delete SRIOV related CRD under openshift-sriov-network-operator namespace
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("crd", "sriovibnetworks.sriovnetwork.openshift.io "+
"sriovnetworknodepolicies.sriovnetwork.openshift.io "+
"sriovnetworknodestates.sriovnetwork.openshift.io "+
"sriovnetworkpoolconfigs.sriovnetwork.openshift.io "+
"sriovnetworks.sriovnetwork.openshift.io "+
"sriovoperatorconfigs.sriovnetwork.openshift.io", "-n", namespace, "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting SRIOV related CRD")
// Verify SRIOV related CRD under openshift-sriov-network-operator namespace are removed
chkOutput, _ := exec.Command("bash", "-c", "oc crd | grep sriov").Output()
o.Expect(string(chkOutput)).Should(o.BeEmpty(), "Not all SRIOV CRD were removed")
// Delete webhook related configurations
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("mutatingwebhookconfigurations", "network-resources-injector-config", "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting network-resources-injector-config")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("MutatingWebhookConfiguration", "sriov-operator-webhook-config", "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting MutatingWebhookConfiguration sriov-operator-webhook-config")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ValidatingWebhookConfiguration", "sriov-operator-webhook-config", "--ignore-not-found").Execute()
exutil.AssertWaitPollNoErr(err, "Got error when deleting ValidatingWebhookConfiguration sriov-operator-webhook-config")
// Verify webhook related configurations are removed
mutatingwebhookconfigs := [2]string{"network-resources-injector-config", "sriov-operator-webhook-config"}
for _, config := range mutatingwebhookconfigs {
sriovChkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mutatingwebhookconfigurations", config).Output()
e2e.Logf("\n output after deleting %s: %s\n", config, output)
if strings.Contains(output, "not found") || strings.Contains(output, "No resources found") {
e2e.Logf("mutatingwebhookconfigs %s is delete successfully", config)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(sriovChkErr, fmt.Sprintf("Failed to delete resource %s", config))
}
sriovChkErr := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 90*time.Second, false, func(cxt context.Context) (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ValidatingWebhookConfiguration", "sriov-operator-webhook-config").Output()
e2e.Logf("\n\n\n output after deleting ValidatingWebhookConfiguration sriov-operator-webhook-config: %s\n\n\n", output)
if strings.Contains(output, "not found") || strings.Contains(output, "No resources found") {
e2e.Logf("ValidatingWebhookConfiguration sriov-operator-webhook-config is delete successfully")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(sriovChkErr, "Failed to delete ValidatingWebhookConfiguration sriov-operator-webhook-config")
}
func installSriovOperator(oc *exutil.CLI, opNamespace string) {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking/sriov")
namespaceTemplate = filepath.Join(buildPruningBaseDir, "namespace-template.yaml")
operatorGroupTemplate = filepath.Join(buildPruningBaseDir, "operatorgroup-template.yaml")
subscriptionTemplate = filepath.Join(buildPruningBaseDir, "subscription-template.yaml")
sriovOperatorconfig = filepath.Join(buildPruningBaseDir, "sriovoperatorconfig.yaml")
opName = "sriov-network-operators"
)
sub := subscriptionResource{
name: "sriov-network-operator-subsription",
namespace: opNamespace,
operatorName: opName,
channel: "stable",
catalog: "qe-app-registry",
catalogNamespace: "openshift-marketplace",
template: subscriptionTemplate,
}
ns := namespaceResource{
name: opNamespace,
template: namespaceTemplate,
}
og := operatorGroupResource{
name: opName,
namespace: opNamespace,
targetNamespaces: opNamespace,
template: operatorGroupTemplate,
}
catalogSource := getOperatorSource(oc, "openshift-marketplace")
if catalogSource == "" {
g.Skip("Skip testing as auto-release-app-registry/qe-app-registry not found")
}
sub.catalog = catalogSource
operatorInstall(oc, sub, ns, og)
e2e.Logf("Operator install check successfull as part of setup !!!!!")
exutil.By("SUCCESS - sriov operator installed")
exutil.By("check sriov version if match the ocp version")
operatorVersion := getOperatorVersion(oc, sub.name, sub.namespace)
ocpversion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(operatorVersion).Should(o.MatchRegexp(ocpversion))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", sriovOperatorconfig, "-n", opNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check all pods in sriov namespace are running")
chkSriovOperatorStatus(oc, sub.namespace)
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
28342efc-32e1-42e1-8499-62e6ffbc802b
|
delete
|
['sriovNetResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (rs *sriovNetResource) delete(oc *exutil.CLI) {
e2e.Logf("delete %s %s in namespace %s", rs.kind, rs.name, rs.namespace)
oc.AsAdmin().WithoutNamespace().Run("delete").Args(rs.kind, rs.name, "-n", rs.namespace).Execute()
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7c205035-c047-4c83-be5d-e05e9f2a2e03
|
create
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
['sriovNetResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (rs *sriovNetResource) create(oc *exutil.CLI, parameters ...string) {
var configFile string
cmd := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
cmd = append(cmd, para)
}
e2e.Logf("parameters list is %s\n", cmd)
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(cmd...).OutputToFile(getRandomString() + "config.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process sriov resource %v", cmd))
e2e.Logf("the file of resource is %s\n", configFile)
_, err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", rs.namespace).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
7ddcf7fb-6b26-4a89-a757-092e082b32e0
|
processPodTemplate
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
['sriovPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (pod *sriovPod) processPodTemplate(oc *exutil.CLI) string {
var configFile string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
parameters := fmt.Sprintf("-p PODNAME=%s, SRIOVNETNAME=%s, IPV4_ADDR=%s, IPV6_ADDR=%s", pod.name, pod.intfresource, pod.ipv4addr, pod.ipv6addr)
if pod.pingip != "" {
parameters += pod.pingip
}
output, err := oc.AsAdmin().Run("process").Args("-f", pod.tempfile, "--ignore-unknown-parameters=true", parameters, "-o=jsonpath={.items[0]}").OutputToFile(getRandomString() + "config.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process pod resource %v", pod.name))
e2e.Logf("the file of resource is %s\n", configFile)
return configFile
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
fbd9d7fe-b3c0-402a-9a3b-232a2a76c948
|
createPod
|
['sriovPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (pod *sriovPod) createPod(oc *exutil.CLI) string {
configFile := pod.processPodTemplate(oc)
podsLog, err1 := oc.AsAdmin().WithoutNamespace().Run("create").Args("--loglevel=10", "-f", configFile, "-n", pod.namespace).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
return podsLog
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
0093a1b9-d96e-4819-b4b2-0d973e8afc4b
|
deletePod
|
['sriovPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (pod *sriovPod) deletePod(oc *exutil.CLI) {
e2e.Logf("delete pod %s in namespace %s", pod.name, pod.namespace)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.name, "-n", pod.namespace).Execute()
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
e91dcb6c-e202-4974-8ccf-d722b5110ab7
|
chkSriovOperatorStatus
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkSriovOperatorStatus(oc *exutil.CLI, ns string) {
e2e.Logf("check if openshift-sriov-network-operator pods are running properly")
chkPodsStatus(oc, ns, "app=network-resources-injector")
chkPodsStatus(oc, ns, "app=operator-webhook")
chkPodsStatus(oc, ns, "app=sriov-network-config-daemon")
chkPodsStatus(oc, ns, "name=sriov-network-operator")
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
fd5f4238-03ae-4157-8f6f-7f2ae18c38c6
|
chkPodsStatus
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkPodsStatus(oc *exutil.CLI, ns, lable string) {
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
podsStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", ns, "-l", lable, "-o=jsonpath={.items[*].status.phase}").Output()
if err != nil {
return false, err
}
podsStatus = strings.TrimSpace(podsStatus)
statusList := strings.Split(podsStatus, " ")
for _, podStat := range statusList {
if strings.Compare(podStat, "Running") != 0 {
return false, nil
}
}
e2e.Logf("All pods with lable %s in namespace %s are Running", lable, ns)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("pod with label %s in namespace %v does not running", lable, ns))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
bb6035d3-37a9-4ca4-923e-fbdda477d5f3
|
rmSriovNetworkPolicy
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func rmSriovNetworkPolicy(oc *exutil.CLI, policyname, ns string) {
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("SriovNetworkNodePolicy", policyname, "-n", ns, "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("remove sriovnetworknodepolicy %s", policyname)
waitForSriovPolicyReady(oc, ns)
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
2af5feb4-9558-49c3-b785-57eb988d7a64
|
rmSriovNetwork
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func rmSriovNetwork(oc *exutil.CLI, netname, ns string) {
sriovNetList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("SriovNetwork", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(sriovNetList, netname) {
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("SriovNetwork", netname, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
ee233441-5cc9-46cb-969c-e84cf9444111
|
waitForPodReady
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sriovPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (pod *sriovPod) waitForPodReady(oc *exutil.CLI) {
res := false
err := wait.Poll(5*time.Second, 15*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod.name, "-n", pod.namespace, "-o=jsonpath={.status.phase}").Output()
e2e.Logf("the status of pod is %s", status)
if strings.Contains(status, "NotFound") {
e2e.Logf("the pod was created fail.")
res = false
return true, nil
}
if err != nil {
e2e.Logf("failed to get pod status: %v, retrying...", err)
return false, nil
}
if strings.Contains(status, "Running") {
e2e.Logf("the pod is Ready.")
res = true
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("sriov pod %v is not ready", pod.name))
o.Expect(res).To(o.Equal(true))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
43042f37-2cd1-4631-befe-b55c5872b87f
|
waitForSriovPolicyReady
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func waitForSriovPolicyReady(oc *exutil.CLI, ns string) {
err := wait.Poll(10*time.Second, 30*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", "-n", ns, "-o=jsonpath={.items[*].status.syncStatus}").Output()
e2e.Logf("the status of sriov policy is %v", status)
if err != nil {
e2e.Logf("failed to get sriov policy status: %v, retrying...", err)
return false, nil
}
nodesStatus := strings.TrimSpace(status)
statusList := strings.Split(nodesStatus, " ")
for _, nodeStat := range statusList {
if nodeStat != "Succeeded" {
e2e.Logf("nodes sync up not ready yet: %v, retrying...", err)
return false, nil
}
}
e2e.Logf("nodes sync up ready now")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "sriovnetworknodestates is not ready")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a84ccc4c-b34f-41f9-afb1-8f098637f557
|
getSriovIntfonPod
|
['"os/exec"']
|
['sriovPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (pod *sriovPod) getSriovIntfonPod(oc *exutil.CLI) string {
msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(pod.name, "-n", pod.namespace, "-i", "--", "ip", "address").Output()
if err != nil {
e2e.Logf("Execute ip address command failed with err:%v .", err)
}
e2e.Logf("Get ip address info as:%v", msg)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
return msg
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
9f3da315-eb1e-4a0d-b440-a1f4722afaae
|
sendHTTPRequest
|
['"fmt"', '"os/exec"']
|
['sriovPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (pod *sriovPod) sendHTTPRequest(oc *exutil.CLI, user, cmd string) {
//generate token for service acount
testToken, err := oc.AsAdmin().WithoutNamespace().Run("sa").Args("get-token", user, "-n", pod.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(testToken).NotTo(o.BeEmpty())
configFile := pod.processPodTemplate(oc)
curlCmd := cmd + " -k " + " -H " + fmt.Sprintf("\"Authorization: Bearer %v\"", testToken) + " -d " + "@" + configFile
e2e.Logf("Send curl request to create new pod: %s\n", curlCmd)
res, err := exec.Command("bash", "-c", curlCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(res).NotTo(o.BeEmpty())
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
60c7488c-772d-466c-8404-a22c21ef8fcc
|
createPolicy
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sriovNetworkNodePolicy']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (sriovPolicy *sriovNetworkNodePolicy) createPolicy(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovPolicy.template, "-p", "NAMESPACE="+sriovPolicy.namespace, "DEVICEID="+sriovPolicy.deviceID, "SRIOVNETPOLICY="+sriovPolicy.policyName, "DEVICETYPE="+sriovPolicy.deviceType, "PFNAME="+sriovPolicy.pfName, "VENDOR="+sriovPolicy.vendor, "NUMVFS="+strconv.Itoa(sriovPolicy.numVfs), "RESOURCENAME="+sriovPolicy.resourceName)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create sriovnetworknodePolicy %v", sriovPolicy.policyName))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
d618c300-28b2-4667-89e5-796f3a65b3f0
|
createSriovNetwork
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sriovNetwork']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (sriovNetwork *sriovNetwork) createSriovNetwork(oc *exutil.CLI) {
err := wait.Poll(2*time.Second, 20*time.Second, func() (bool, error) {
if sriovNetwork.spoolchk == "" {
sriovNetwork.spoolchk = "off"
}
if sriovNetwork.trust == "" {
sriovNetwork.trust = "on"
}
if sriovNetwork.linkState == "" {
sriovNetwork.linkState = "auto"
}
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovNetwork.template, "-p", "NAMESPACE="+sriovNetwork.namespace, "SRIOVNETNAME="+sriovNetwork.name, "TARGETNS="+sriovNetwork.networkNamespace, "SRIOVNETPOLICY="+sriovNetwork.resourceName, "SPOOFCHK="+sriovNetwork.spoolchk, "TRUST="+sriovNetwork.trust, "LINKSTATE="+sriovNetwork.linkState, "MINTXRATE="+strconv.Itoa(sriovNetwork.minTxRate), "MAXTXRATE="+strconv.Itoa(sriovNetwork.maxTxRate), "VLANID="+strconv.Itoa(sriovNetwork.vlanId), "VLANQOS="+strconv.Itoa(sriovNetwork.vlanQoS))
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create sriovnetwork %v", sriovNetwork.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
75159221-3f4d-4f76-b60c-a9fecd92a806
|
createSriovTestPod
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sriovTestPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (sriovTestPod *sriovTestPod) createSriovTestPod(oc *exutil.CLI) {
err := wait.Poll(2*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sriovTestPod.template, "-p", "PODNAME="+sriovTestPod.name, "SRIOVNETNAME="+sriovTestPod.networkName, "NAMESPACE="+sriovTestPod.namespace)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create test pod %v", sriovTestPod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
bc738d39-80ea-429e-b430-ba39959a9de3
|
getPciAddress
|
['"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getPciAddress(namespace string, podName string, policyName string) string {
pciAddress, err := e2eoutput.RunHostCmdWithRetries(namespace, podName, "printenv PCIDEVICE_OPENSHIFT_IO_"+strings.ToUpper(policyName), 3*time.Second, 30*time.Second)
e2e.Logf("Get the pci address env is: %s", pciAddress)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pciAddress).NotTo(o.BeEmpty())
return strings.TrimSuffix(pciAddress, "\n")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
6993c3cc-e69e-4676-b8be-75971d68714c
|
getSriovNode
|
['"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getSriovNode(oc *exutil.CLI, namespace string, label string) string {
sriovNodeName := ""
nodeNamesAll, err := oc.AsAdmin().Run("get").Args("-n", namespace, "node", "-l", label, "-ojsonpath={.items..metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames := strings.Split(nodeNamesAll, " ")
for _, nodeName := range nodeNames {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", nodeName, "-n", namespace, "-ojsonpath={.spec.interfaces}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output != "" {
sriovNodeName = nodeName
break
}
}
e2e.Logf("The sriov node is %v ", sriovNodeName)
o.Expect(sriovNodeName).NotTo(o.BeEmpty())
return sriovNodeName
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
0ea0250d-3d43-44ac-ad66-3bf68f13f2ad
|
checkDeviceIDExist
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func checkDeviceIDExist(oc *exutil.CLI, namespace string, deviceID string) bool {
allDeviceID, err := oc.AsAdmin().Run("get").Args("sriovnetworknodestates", "-n", namespace, "-ojsonpath={.items[*].status.interfaces[*].deviceID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("tested deviceID is %v and all supported deviceID on node are %v ", deviceID, allDeviceID)
return strings.Contains(allDeviceID, deviceID)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
22e5927d-8815-464a-ba65-9b5b7ed7f3b7
|
chkSriovPolicy
|
['"strings"']
|
['sriovNetResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (rs *sriovNetResource) chkSriovPolicy(oc *exutil.CLI) bool {
sriovPolicyList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("SriovNetworkNodePolicy", "-n", rs.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(sriovPolicyList, rs.name) {
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
fd56a77a-1487-4796-947d-da86a3999db2
|
waitForSriovPolicySyncUpStart
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func waitForSriovPolicySyncUpStart(oc *exutil.CLI, ns string) {
err := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates", "-n", ns, "-o=jsonpath={.items[*].status.syncStatus}").Output()
e2e.Logf("the status of sriov policy is %s", status)
if err != nil {
e2e.Logf("failed to get sriov policy status: %v, retrying...", err)
return false, nil
}
nodesStatus := strings.TrimSpace(status)
statusList := strings.Split(nodesStatus, " ")
for _, nodeStat := range statusList {
if nodeStat == "InProgress" {
e2e.Logf("nodes start to sync up ...", err)
return true, nil
}
}
e2e.Logf("nodes sync up hasn't started yet ...")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "sriovnetworknodestates sync up is in progress")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
5f505862-5051-400f-a6e7-152f4278de3a
|
getOperatorVersion
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getOperatorVersion(oc *exutil.CLI, subname string, ns string) string {
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", subname, "-n", ns, "-o=jsonpath={.status.currentCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
e2e.Logf("operator version is %s", csvName)
return csvName
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
672dc173-8b99-4a41-aee4-f00ca9944e68
|
findSchedulingDisabledNode
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func findSchedulingDisabledNode(oc *exutil.CLI, interval, timeout time.Duration, label string) string {
scheduleDisableNodeName := ""
errNode := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(ctx context.Context) (bool, error) {
nodeNamesAll, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", label, "-ojsonpath={.items..metadata.name}").Output()
if err != nil {
return false, nil
}
nodeNames := strings.Split(nodeNamesAll, " ")
for _, nodeName := range nodeNames {
nodeOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName).Output()
if err == nil {
if strings.Contains(nodeOutput, "NotReady") || strings.Contains(nodeOutput, "SchedulingDisabled") {
scheduleDisableNodeName = nodeName
break
}
}
}
if scheduleDisableNodeName == "" {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errNode, fmt.Sprintf("no node become SchedulingDisabled or Notready!"))
return scheduleDisableNodeName
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4d65e576-b932-4011-a798-da658d52cf89
|
chkVFStatusMatch
|
['"fmt"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkVFStatusMatch(oc *exutil.CLI, nodeName, nicName, macAddress, expectVaule string) {
cmd := fmt.Sprintf("ip link show %s | grep %s", nicName, macAddress)
output, debugNodeErr := exutil.DebugNode(oc, nodeName, "bash", "-c", cmd)
e2e.Logf("The ip link show. \n %v", output)
o.Expect(debugNodeErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, expectVaule)).To(o.BeTrue())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7cf53ad5-90a6-4c47-a9d9-e05be29fe0d3
|
initVF
|
['"path/filepath"']
|
['sriovNetworkNodePolicy']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func initVF(oc *exutil.CLI, name, deviceID, interfaceName, vendor, ns string, vfNum int) bool {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovNetworkNodePolicyTemplate := filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml")
sriovPolicy := sriovNetworkNodePolicy{
policyName: name,
deviceType: "netdevice",
deviceID: deviceID,
pfName: interfaceName,
vendor: vendor,
numVfs: vfNum,
resourceName: name,
template: sriovNetworkNodePolicyTemplate,
namespace: ns,
}
exutil.By("Check the deviceID if exist on the cluster worker")
e2e.Logf("Create VF on name: %s, deviceID: %s, interfacename: %s, vendor: %s", name, deviceID, interfaceName, vendor)
if !chkPfExist(oc, deviceID, interfaceName) {
e2e.Logf("the cluster do not contain the sriov card. skip this testing!")
return false
}
//defer rmSriovNetworkPolicy(oc, sriovPolicy.policyName, sriovOpNs)
sriovPolicy.createPolicy(oc)
waitForSriovPolicyReady(oc, ns)
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
9c7f8ec6-e479-46c6-8a65-f81c1e82304b
|
initDpdkVF
|
['"path/filepath"']
|
['sriovNetworkNodePolicy']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func initDpdkVF(oc *exutil.CLI, name, deviceID, interfaceName, vendor, ns string, vfNum int) bool {
deviceType := "vfio-pci"
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovNetworkNodePolicyTemplate := filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-template.yaml")
if vendor == "15b3" {
deviceType = "netdevice"
sriovNetworkNodePolicyTemplate = filepath.Join(buildPruningBaseDir, "sriovnetworkpolicy-mlx-dpdk-template.yaml")
}
sriovPolicy := sriovNetworkNodePolicy{
policyName: name,
deviceType: deviceType,
deviceID: deviceID,
pfName: interfaceName,
vendor: vendor,
numVfs: vfNum,
resourceName: name,
template: sriovNetworkNodePolicyTemplate,
namespace: ns,
}
exutil.By("Check the deviceID if exist on the cluster worker")
e2e.Logf("Create VF on name: %s, deviceID: %s, interfacename: %s, vendor: %s", name, deviceID, interfaceName, vendor)
if !chkPfExist(oc, deviceID, interfaceName) {
e2e.Logf("the cluster do not contain the sriov card. skip this testing!")
return false
}
// create dpdk policy
sriovPolicy.createPolicy(oc)
waitForSriovPolicyReady(oc, ns)
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
07433e5e-72ad-4f48-8fed-f42009a713e7
|
chkVFStatusWithPassTraffic
|
['"path/filepath"', '"strconv"', '"strings"', '"time"']
|
['sriovTestPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkVFStatusWithPassTraffic(oc *exutil.CLI, nadName, nicName, ns, expectVaule string) {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml")
exutil.By("Create test pod on the target namespace")
for i := 0; i < 2; i++ {
sriovTestPod := sriovTestPod{
name: "testpod" + strconv.Itoa(i),
namespace: ns,
networkName: nadName,
template: sriovTestPodTemplate,
}
sriovTestPod.createSriovTestPod(oc)
err := waitForPodWithLabelReady(oc, ns, "name=sriov-netdevice")
exutil.AssertWaitPollNoErr(err, "this pod with label name=sriov-netdevice not ready")
if strings.Contains(expectVaule, "mtu") {
mtucheck, err := e2eoutput.RunHostCmdWithRetries(ns, sriovTestPod.name, "ip addr show net1", 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(mtucheck, expectVaule)).To(o.BeTrue())
} else {
nodeName, nodeNameErr := exutil.GetPodNodeName(oc, ns, sriovTestPod.name)
o.Expect(nodeNameErr).NotTo(o.HaveOccurred())
podMac := getInterfaceMac(oc, ns, sriovTestPod.name, "net1")
chkVFStatusMatch(oc, nodeName, nicName, podMac, expectVaule)
}
}
chkPodsPassTraffic(oc, "testpod0", "testpod1", "net1", ns)
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
a1794127-6d63-42e0-a5c8-3cdc4ce0d35f
|
chkPodsPassTraffic
|
['"regexp"', '"strings"', '"time"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkPodsPassTraffic(oc *exutil.CLI, pod1name string, pod2name string, infName string, ns string) {
exutil.By("Check the interface is connected, if not skip the connect testing")
cmd := "ip addr show " + infName
podConnectStatus1, err1 := e2eoutput.RunHostCmdWithRetries(ns, pod1name, cmd, 3*time.Second, 30*time.Second)
o.Expect(err1).NotTo(o.HaveOccurred())
podConnectStatus2, err2 := e2eoutput.RunHostCmdWithRetries(ns, pod2name, cmd, 3*time.Second, 30*time.Second)
o.Expect(err2).NotTo(o.HaveOccurred())
e2e.Logf("The ip connection of %v show: \n %v", pod1name, podConnectStatus1)
e2e.Logf("The ip connection of %v show: \n %v", pod2name, podConnectStatus2)
//if podConnectStatus including NO-CARRIER, then skip the connection testing
if !strings.Contains(podConnectStatus1, "NO-CARRIER") && !strings.Contains(podConnectStatus2, "NO-CARRIER") {
exutil.By("Get destination Pod's IP on secondary interface")
cmd = "ip addr show dev " + infName + " | grep global"
net1Output, _ := e2eoutput.RunHostCmdWithRetries(ns, pod2name, cmd, 3*time.Second, 30*time.Second)
net1Output = strings.TrimSpace(net1Output)
// Match our IPv4 and IPv6 address on net1 ip address output
rev4 := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)
ipv4Addresses := rev4.FindAllString(net1Output, -1)
rev6 := regexp.MustCompile(`inet6\s+([0-9a-fA-F:]{2,39})(?:/\d{1,3})?`)
ipv6Addresses := rev6.FindAllStringSubmatch(net1Output, -1)
for _, match := range ipv6Addresses {
if len(match) > 1 {
ipv6Address := match[1]
e2e.Logf("\n destination pod %s net1 IPv6 address: %s\n", pod2name, ipv6Address)
CurlMultusPod2PodPass(oc, ns, pod1name, ipv6Address, infName, "Hello")
}
}
e2e.Logf("\n destination pod %s net1 IPv4 address: %s\n", pod2name, ipv4Addresses)
if len(ipv4Addresses) != 0 {
CurlMultusPod2PodPass(oc, ns, pod1name, ipv4Addresses[0], infName, "Hello")
}
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4d57be36-1a0a-4206-be1d-468b61d42377
|
deleteSriovTestPod
|
['sriovTestPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func (sriovTestPod *sriovTestPod) deleteSriovTestPod(oc *exutil.CLI) {
e2e.Logf("delete pod %s in namespace %s", sriovTestPod.name, sriovTestPod.namespace)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", sriovTestPod.name, "-n", sriovTestPod.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
54d09f44-dd8c-4cf0-8426-44fa62ccacb6
|
createNumPods
|
['"path/filepath"', '"strconv"']
|
['sriovTestPod']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func createNumPods(oc *exutil.CLI, nadName, ns, podPrex string, numPods int) {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/sriov")
sriovTestPodTemplate := filepath.Join(buildPruningBaseDir, "sriov-netdevice-template.yaml")
exutil.By("Create test pod on the target namespace")
for i := 0; i < numPods; i++ {
sriovTestPod := sriovTestPod{
name: podPrex + strconv.Itoa(i),
namespace: ns,
networkName: nadName,
template: sriovTestPodTemplate,
}
sriovTestPod.createSriovTestPod(oc)
}
err := waitForPodWithLabelReady(oc, ns, "name=sriov-netdevice")
exutil.AssertWaitPollNoErr(err, "pods with label name=sriov-netdevice not ready")
e2e.Logf("Have successfully created %v pods", numPods)
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
100adac7-c1dc-4c0f-9a91-58b6dacd227d
|
getSriovWokerNodes
|
['"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getSriovWokerNodes(oc *exutil.CLI) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "feature.node.kubernetes.io/sriov-capable=true",
"-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(output)
return nodeNameList
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c37adb6d-adf0-405b-b457-207442fe51b2
|
getWorkerNodesWithNic
|
['"regexp"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getWorkerNodesWithNic(oc *exutil.CLI, deviceid string, pfname string) []string {
workerWithNicList := []string{}
nodeNameList := getSriovWokerNodes(oc)
e2e.Logf("print all worker nodes %v", nodeNameList)
for _, workerNode := range nodeNameList {
output, checkNicErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovnetworknodestates.sriovnetwork.openshift.io", workerNode,
"-n", "openshift-sriov-network-operator", "-o=jsonpath={.status.interfaces}").Output()
o.Expect(checkNicErr).NotTo(o.HaveOccurred())
nicList := strings.Split(output, "}")
for _, nicInfo := range nicList {
// at least one worker node should have required PF.
re1 := regexp.MustCompile(`\"` + pfname + `\"`)
re2 := regexp.MustCompile(`\"deviceID\":\"` + deviceid + `\"`)
if re1.MatchString(nicInfo) && re2.MatchString(nicInfo) {
e2e.Logf("on worker node %v, find PF %v!!", workerNode, pfname)
workerWithNicList = append(workerWithNicList, workerNode)
}
}
e2e.Logf("The worker list which has device id %v, pfname %v is %v", deviceid, pfname, workerWithNicList)
}
return workerWithNicList
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d87ce153-1030-4c7a-8a06-2755e808963a
|
chkPfExist
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkPfExist(oc *exutil.CLI, deviceid string, pfname string) bool {
res := true
workerList := getWorkerNodesWithNic(oc, deviceid, pfname)
if len(workerList) == 0 {
e2e.Logf("The worker nodes don't have the required PF %v with DeviceID %v", pfname, deviceid)
res = false
}
return res
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
ed701990-f35d-491e-a997-809660e37c8e
|
chkNAD
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkNAD(oc *exutil.CLI, ns string, name string, expected bool) error {
return wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("net-attach-def", "-n", ns, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the NAD list in ns %v is %v", ns, output)
if expected && !strings.Contains(output, name) {
e2e.Logf("Can not get NAD, got err:%v, and try next round", err)
return false, nil
}
if !expected && strings.Contains(output, name) {
e2e.Logf("NAD has not beem removed, try again")
return false, nil
}
return true, nil
})
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
e052204e-8c64-4d14-9bcd-8aba88ab1d36
|
rmNAD
|
['e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func rmNAD(oc *exutil.CLI, ns string, name string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("net-attach-def", "-n", ns, name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("the NAD %v is removed", name)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c02d4610-f525-4f69-a29d-0308d898114f
|
setSriovWebhook
|
['e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func setSriovWebhook(oc *exutil.CLI, status string) {
//enable webhook
var patchYamlToRestore string
if status == "true" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableOperatorWebhook","value": true}]`
} else if status == "false" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableOperatorWebhook","value": false}]`
}
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator",
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := "sriovoperatorconfig.sriovnetwork.openshift.io/default patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
// check webhook is set correctly
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator", "-o=jsonpath={.spec.enableOperatorWebhook}").Output()
e2e.Logf("the status of sriov webhook is %v", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(status))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
6b326c70-d061-4a34-a401-42cb50116cff
|
chkSriovWebhookResource
|
['e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkSriovWebhookResource(oc *exutil.CLI, status bool) {
output1, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ds", "operator-webhook", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output1)
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "operator-webhook-sa", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output2)
output3, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "operator-webhook-service", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output3)
output4, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrole", "operator-webhook", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output4)
output5, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("MutatingWebhookConfiguration", "sriov-operator-webhook-config", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output5)
output6, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrolebinding", "operator-webhook-role-binding ", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output6)
if status == true {
o.Expect(output1).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output2).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output3).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output4).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output5).Should(o.ContainSubstring("operator-webhook"))
o.Expect(output6).Should(o.ContainSubstring("operator-webhook"))
} else {
o.Expect(output1).Should(o.ContainSubstring("not found"))
o.Expect(output2).Should(o.ContainSubstring("not found"))
o.Expect(output3).Should(o.ContainSubstring("not found"))
o.Expect(output4).Should(o.ContainSubstring("not found"))
o.Expect(output5).Should(o.ContainSubstring("not found"))
o.Expect(output6).Should(o.ContainSubstring("not found"))
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
ee25bd1a-e4b4-42ca-aff0-89b289c04878
|
setSriovInjector
|
['e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func setSriovInjector(oc *exutil.CLI, status string) {
//enable sriov resource injector
var patchYamlToRestore string
if status == "true" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableInjector","value": true}]`
} else if status == "false" {
patchYamlToRestore = `[{"op":"replace","path":"/spec/enableInjector","value": false}]`
}
output, err1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator",
"--type=json", "-p", patchYamlToRestore).Output()
e2e.Logf("patch result is %v", output)
o.Expect(err1).NotTo(o.HaveOccurred())
matchStr := "sriovoperatorconfig.sriovnetwork.openshift.io/default patched"
o.Expect(output).Should(o.ContainSubstring(matchStr))
// check injector is set correctly
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sriovoperatorconfigs", "default", "-n", "openshift-sriov-network-operator", "-o=jsonpath={.spec.enableInjector}").Output()
e2e.Logf("the status of sriov resource injector is %v", output)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(status))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
f5be8941-308e-419d-8d04-a2ace5e23b40
|
chkSriovInjectorResource
|
['e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func chkSriovInjectorResource(oc *exutil.CLI, status bool) {
output1, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ds", "network-resources-injector", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output1)
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "network-resources-injector-sa", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output2)
output3, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "network-resources-injector-service", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output3)
output4, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrole", "network-resources-injector", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output4)
output5, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("MutatingWebhookConfiguration", "network-resources-injector-config", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output5)
output6, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrolebinding", "network-resources-injector-role-binding", "-n", "openshift-sriov-network-operator").Output()
e2e.Logf("the result of output is %v", output6)
if status == true {
o.Expect(output1).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output2).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output3).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output4).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output5).Should(o.ContainSubstring("network-resources-injector"))
o.Expect(output6).Should(o.ContainSubstring("network-resources-injector"))
} else {
o.Expect(output1).Should(o.ContainSubstring("not found"))
o.Expect(output2).Should(o.ContainSubstring("not found"))
o.Expect(output3).Should(o.ContainSubstring("not found"))
o.Expect(output4).Should(o.ContainSubstring("not found"))
o.Expect(output5).Should(o.ContainSubstring("not found"))
o.Expect(output6).Should(o.ContainSubstring("not found"))
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
bd321862-c7ed-4855-8490-d748118c8a2b
|
pingPassWithNet1
|
['"fmt"', '"strings"', '"time"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func pingPassWithNet1(oc *exutil.CLI, ns1, pod1, pod2 string) {
pod1IPv4, _ := getPodMultiNetwork(oc, ns1, pod1)
e2e.Logf("The second interface v4 address of pod1 is: %v", pod1IPv4)
command := fmt.Sprintf("ping -c 3 %s", pod1IPv4)
pingOutput, err := e2eoutput.RunHostCmdWithRetries(ns1, pod2, command, 3*time.Second, 12*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ping output: %v", pingOutput)
o.Expect(strings.Count(pingOutput, "3 received")).To(o.Equal(1))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9c0023f7-eaf2-40e2-9f5f-215e6db0c7b8
|
getInterfaceMac
|
['"fmt"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getInterfaceMac(oc *exutil.CLI, namespace, podName, interfaceName string) string {
command := fmt.Sprintf("ip link show %s | awk '/link\\/ether/ {print $2}'", interfaceName)
podInterfaceMac, err := e2eoutput.RunHostCmdWithRetries(namespace, podName, command, 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
podInterfaceMac = strings.TrimSpace(podInterfaceMac)
return podInterfaceMac
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
00edcdd1-2b34-4a61-b37f-216a7c496916
|
getOperatorSource
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func getOperatorSource(oc *exutil.CLI, namespace string) string {
catalogSourceNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(catalogSourceNames, "auto-release-app-registry") {
return "auto-release-app-registry"
} else if strings.Contains(catalogSourceNames, "qe-app-registry") {
return "qe-app-registry"
} else {
return ""
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
90697923-29e3-41f1-abd9-fc658d51aa67
|
checkInterfaceNoCarrier
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/sriov_util.go
|
func checkInterfaceNoCarrier(oc *exutil.CLI, nodeName string, interfaceName string) bool {
var output string
var err error
checkErr := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
output, err = exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "/usr/sbin/ip address show dev "+interfaceName)
if output == "" || err != nil {
e2e.Logf("Did not get node's management interface, errors: %v, try again", err)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("fail to check if interface %s on node %s has carrier, err: %v", interfaceName, nodeName, checkErr))
return strings.Contains(output, "NO-CARRIER")
}
|
networking
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.