element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
de33273a-365f-4065-821f-714b8370f06d
|
Author:asood-NonHyperShiftHOST-Medium-50947-Medium-50948-Verify BGP and L2 Advertisement webhook validation.
|
['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-Medium-50947-Medium-50948-Verify BGP and L2 Advertisement webhook validation.", func() {
workers := []string{"worker-1", "worker-2", "worker-3"}
bgpCommunties := []string{"65001:65500"}
ipaddrpools := []string{"ipaddresspool-0", "ipaddresspool-1"}
bgpPeers := []string{"peer-64500", "peer-65000"}
interfaces := []string{"br-ex", "eno1", "eno2"}
crMap := make(map[string]string)
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-50948",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
l2advertisement := l2AdvertisementResource{
name: "l2-adv-50947",
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
exutil.By("Create BGP and L2 Advertisement")
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
crMap["bgpadvertisements"] = bgpAdvertisement.name
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
crMap["l2advertisements"] = l2advertisement.name
for crType, crName := range crMap {
exutil.By(fmt.Sprintf("Validate duplicate ip address pool is rejected for %s", crType))
ipaddrpools = append(ipaddrpools, "ipaddresspool-1")
addrPoolList, err := json.Marshal(ipaddrpools)
o.Expect(err).NotTo(o.HaveOccurred())
patchAdvertisement := fmt.Sprintf("{\"spec\":{\"ipAddressPools\": %s}}", string(addrPoolList))
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args(crType, crName, "-n", opNamespace, "--type=merge", "-p", patchAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of ipAddressPools")).To(o.BeTrue())
exutil.By(fmt.Sprintf("Validate duplicate node is rejected for %s", crType))
workers = append(workers, "worker-1")
workerList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
patchAdvertisement = fmt.Sprintf("{\"spec\":{\"nodeSelectors\":[{\"matchExpressions\":[{\"key\":\"kubernetes.io/hostname\",\"operator\":\"In\",\"values\":%s}]}]}}", string(workerList))
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args(crType, crName, "-n", opNamespace, "--type=merge", "-p", patchAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of match expression value in label selector")).To(o.BeTrue())
}
exutil.By("Validate community strings is updated with community object for BGP Advertisements")
bgpCommunties = []string{"65001:65500", "community1"}
bgpCommStrList, err := json.Marshal(bgpCommunties)
o.Expect(err).NotTo(o.HaveOccurred())
patchBgpAdvertisement := fmt.Sprintf("{\"spec\":{\"communities\": %s}}", string(bgpCommStrList))
_, patchErr1 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr1).NotTo(o.HaveOccurred())
exutil.By("Validate duplicate community strings is rejected for BGP Advertisements")
bgpCommunties = append(bgpCommunties, "65001:65500")
bgpCommStrList, err = json.Marshal(bgpCommunties)
o.Expect(err).NotTo(o.HaveOccurred())
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"communities\": %s}}", string(bgpCommStrList))
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of community")).To(o.BeTrue())
exutil.By("Validate duplicate BGP Peer is rejected for BGP Advertisements")
bgpPeers = append(bgpPeers, "peer-64500")
bgpPeersList, err := json.Marshal(bgpPeers)
o.Expect(err).NotTo(o.HaveOccurred())
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"peers\": %s}}", string(bgpPeersList))
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate definition of peers")).To(o.BeTrue())
exutil.By("Validate invalid IPv4 aggregation length is rejected for BGP Advertisements")
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"aggregationLength\": %d}}", 33)
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "invalid aggregation length")).To(o.BeTrue())
exutil.By("Validate invalid IPv6 aggregation length is rejected for BGP Advertisements")
patchBgpAdvertisement = fmt.Sprintf("{\"spec\":{\"aggregationLengthV6\": %d}}", 129)
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgpadvertisement", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace, "--type=merge", "-p", patchBgpAdvertisement).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "invalid aggregation length")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
f1379ac8-b0cd-44f0-b041-9218cfb30f78
|
Author:qiowang-NonHyperShiftHOST-High-46124-Verify webhook validation for BGP peer
|
['"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:qiowang-NonHyperShiftHOST-High-46124-Verify webhook validation for BGP peer", func() {
exutil.By("1. Create two BGPPeer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
for i := 1; i < 3; i++ {
BGPPeerCR := bgpPeerResource{
name: "peer-46124-" + strconv.Itoa(i),
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: "",
myASN: 65501,
peerASN: 65500 + i,
peerAddress: "10.10.10." + strconv.Itoa(i),
peerPort: 6000,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
}
exutil.By("2. Validate two BGPPeer with same peerASN and peerAddress is invalid")
patchBGPPeer := `{"spec":{"peerASN":65501,"peerAddress": "10.10.10.1"}}`
patchOutput, patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "duplicate BGPPeers")).To(o.BeTrue())
exutil.By("3. Validate two BGPPeer with different peerASN but same peerAddress is invalid")
patchBGPPeer = `{"spec":{"peerAddress": "10.10.10.1"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "already exists")).To(o.BeTrue())
exutil.By("4. Validate two BGPPeer with different myASN is invalid")
patchBGPPeer = `{"spec":{"myASN": 65502}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "all myAsn must be equal for the same VRF")).To(o.BeTrue())
exutil.By("5. Validate BGPPeer with one of the ASN number more than 4294967296 is invalid")
patchBGPPeer = `{"spec":{"myASN": 4294967297}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "spec.myASN in body should be less than or equal to 4294967295")).To(o.BeTrue())
exutil.By("6. Validate BGPPeer with invalid source address is invalid")
patchBGPPeer = `{"spec":{"peerAddress": "10.10.10"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "invalid BGPPeer address")).To(o.BeTrue())
exutil.By("7. Validate BGPPeer with port number greater than 16384 or less than 0 is invalid")
patchBGPPeer = `{"spec":{"peerPort": 16385}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "spec.peerPort in body should be less than or equal to 16384")).To(o.BeTrue())
patchBGPPeer = `{"spec":{"peerPort": -1}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "spec.peerPort in body should be greater than or equal to 0")).To(o.BeTrue())
exutil.By("8. Validate hold timer and keepalive timer without unit is invalid")
patchBGPPeer = `{"spec":{"holdTime": "30", "keepaliveTime": "10"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "missing unit")).To(o.BeTrue())
exutil.By("9. Validate BGPPeer with keepalive timer greater than holdtime is invalid")
patchBGPPeer = `{"spec":{"keepaliveTime": "40s"}}`
patchOutput, patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", "peer-46124-2", "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Output()
o.Expect(patchErr).To(o.HaveOccurred())
o.Expect(strings.Contains(patchOutput, "must be lower than holdTime")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
646fad0e-7936-48ac-a946-d09b612195f4
|
Author:asood-NonHyperShiftHOST-High-46560-High-50944-MetalLB-CR All Workers Creation and Verify the logging level of MetalLB can be changed for debugging [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-High-46560-High-50944-MetalLB-CR All Workers Creation and Verify the logging level of MetalLB can be changed for debugging [Serial]", func() {
exutil.By("Creating metalLB CR on all the worker nodes in cluster")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: metalLBNodeSelKey,
nodeSelectorVal: metalLBNodeSelVal,
controllerSelectorKey: metalLBControllerSelKey,
controllerSelectorVal: metalLBControllerSelVal,
template: metallbCRTemplate,
}
defer removeResource(oc, true, true, "metallb", metallbCR.name, "-n", metallbCR.namespace)
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - MetalLB CR Created")
exutil.By("Validate speaker and frr-k8s pods scheduled on worker nodes")
result = validateAllWorkerNodeMCR(oc, opNamespace)
o.Expect(result).To(o.BeTrue())
exutil.By("50944-Verify the logging level of MetalLB can be changed for debugging")
exutil.By("Validate log level is info")
level := "info"
components := [3]string{"controller", "speaker", "frr-k8s"}
var err string
for _, component := range components {
result, err = checkLogLevelPod(oc, component, opNamespace, level)
o.Expect(result).To(o.BeTrue())
o.Expect(err).Should(o.BeEmpty())
e2e.Logf("%s pod log level is %s", component, level)
}
exutil.By("Change the log level")
//defer not needed because metallb CR is deleted at the end of the test
patchResourceAsAdmin(oc, "metallb/"+metallbCR.name, "{\"spec\":{\"logLevel\": \"debug\"}}", opNamespace)
exutil.By("Verify the deployment and daemon set have rolled out")
dpStatus, dpStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "deployment", "controller", "--timeout", "5m").Output()
o.Expect(dpStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dpStatus, "successfully rolled out")).To(o.BeTrue())
dsSets := [2]string{"speaker", "frr-k8s"}
for _, dsSet := range dsSets {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", dsSet, "--timeout", "5m").Output()
o.Expect(dsStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dsStatus, "successfully rolled out")).To(o.BeTrue())
}
level = "debug"
for _, component := range components {
result, err = checkLogLevelPod(oc, component, opNamespace, level)
o.Expect(result).To(o.BeTrue())
o.Expect(err).Should(o.BeEmpty())
e2e.Logf("%s pod log level is %s", component, level)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
661bd7df-e816-4729-ac2b-5afd2e8e1e21
|
Author:asood-NonHyperShiftHOST-High-54857-Validate controller and pod can be scheduled based on node selectors.[Serial]
|
['"context"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-High-54857-Validate controller and pod can be scheduled based on node selectors.[Serial]", func() {
var nodeSelKey = "kubernetes.io/hostname"
exutil.By("Obtain the worker nodes in cluster")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("This test can only be run for cluster that has atleast two worker nodes.")
}
exutil.By("Creating metalLB CR on specific worker nodes in cluster")
metallbCRTemplate := filepath.Join(testDataDir, "metallb-cr-template.yaml")
metallbCR := metalLBCRResource{
name: "metallb",
namespace: opNamespace,
nodeSelectorKey: nodeSelKey,
nodeSelectorVal: workerList.Items[0].Name,
controllerSelectorKey: nodeSelKey,
controllerSelectorVal: workerList.Items[1].Name,
template: metallbCRTemplate,
}
defer removeResource(oc, true, true, "metallb", metallbCR.name, "-n", metallbCR.namespace)
result := createMetalLBCR(oc, metallbCR, metallbCRTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("Get the pod names for speaker and controller respectively scheduled on %s and %s", workerList.Items[0].Name, workerList.Items[1].Name))
components := []string{"speaker", "controller"}
for i, component := range components {
podName, err := exutil.GetPodName(oc, opNamespace, "component="+component, workerList.Items[i].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d6f10cb9-e4fd-4fc7-a2a0-ab680579c3ae
|
Author:asood-NonHyperShiftHOST-High-54822-Validate controller and speaker pods can be scheduled based on affinity - node affinity, pod affinity and pod anti affinity.[Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-High-54822-Validate controller and speaker pods can be scheduled based on affinity - node affinity, pod affinity and pod anti affinity.[Serial]", func() {
var (
testDataBaseDir = exutil.FixturePath("testdata", "networking")
nodeLabels = []string{"east", "west"}
nodeAffinityFile = filepath.Join(testDataDir, "metallb-cr-node-affinity.yaml")
nodeAffinityTemplate = filepath.Join(testDataDir, "metallb-cr-node-affinity-template.yaml")
podAffinityTemplate = filepath.Join(testDataDir, "metallb-cr-pod-affinity-template.yaml")
podAntiAffinityTemplate = filepath.Join(testDataDir, "metallb-cr-pod-antiaffinity-template.yaml")
pingPodNodeTemplate = filepath.Join(testDataBaseDir, "ping-for-pod-specific-node-template.yaml")
components = []string{"controller", "speaker", "frr-k8s"}
)
exutil.By("Obtain the worker nodes in cluster")
workersList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workersList.Items) < 2 {
g.Skip("This test can only be run for cluster that has atleast two worker nodes.")
}
exutil.By("Label two nodes of the cluster")
for i := 0; i < 2; i++ {
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workersList.Items[i].Name, "zone")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workersList.Items[i].Name, "zone", nodeLabels[i])
}
defer removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
metallbCR := metalLBAffinityCRResource{
name: "metallb",
namespace: opNamespace,
param1: "",
param2: "",
template: "",
}
// Node afinity
for i := 0; i < 2; i++ {
if i == 0 {
exutil.By("Create meatllb CR with Node Affinity using node selector term - matchExpressions")
createResourceFromFile(oc, opNamespace, nodeAffinityFile)
} else {
exutil.By("Create meatllb CR with Node Affinity using node selector term - matchFields")
metallbCR.param1 = workersList.Items[0].Name
metallbCR.param2 = workersList.Items[1].Name
metallbCR.template = nodeAffinityTemplate
o.Expect(createMetalLBAffinityCR(oc, metallbCR)).To(o.BeTrue())
}
exutil.By(fmt.Sprintf("Get the pod names for controller and speaker & frr-k8s respectively scheduled on %s and %s", workersList.Items[0].Name, workersList.Items[1].Name))
expectedPodNodeList := []string{workersList.Items[0].Name, workersList.Items[1].Name, workersList.Items[1].Name}
for j, component := range components {
if j == 0 {
err := waitForPodWithLabelReady(oc, opNamespace, "component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", component, "--timeout", "5m").Output()
o.Expect(dsStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dsStatus, "successfully rolled out")).To(o.BeTrue())
}
podName, err := exutil.GetPodName(oc, opNamespace, "component="+component, expectedPodNodeList[j])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
}
removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
}
// Pod affinity and anti affinity
exutil.By("Create a pod on one of the nodes")
pod := pingPodResourceNode{
name: "hello-pod",
namespace: oc.Namespace(),
nodename: workersList.Items[0].Name,
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, pod.namespace, pod.name)
metallbCR.param1 = pod.namespace
metallbCR.param2 = pod.namespace
metallbCRTemplateList := []string{podAffinityTemplate, podAntiAffinityTemplate}
dsSearchStrList := []string{fmt.Sprintf("1 of %v updated pods are available", len(workersList.Items)), fmt.Sprintf("%v of %v updated pods are available", len(workersList.Items)-1, len(workersList.Items))}
scenarioStrList := []string{"affinity", "anti affinity"}
for index, scenario := range scenarioStrList {
exutil.By(fmt.Sprintf("Create meatllb CR with pod %s", scenario))
metallbCR.template = metallbCRTemplateList[index]
o.Expect(createMetalLBAffinityCR(oc, metallbCR)).To(o.BeTrue())
exutil.By(fmt.Sprintf("Validate roll out status of speaker and frr-k8s for pod %s", scenario))
for i := 1; i < len(components); i++ {
o.Eventually(func() bool {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", components[i], "--timeout", "10s").Output()
o.Expect(dsStatusErr).To(o.HaveOccurred())
return strings.Contains(dsStatus, dsSearchStrList[index])
}, "60s", "10s").Should(o.BeTrue(), "Pods did not reach running status")
}
if index == 0 {
exutil.By(fmt.Sprintf("Validate metallb pods are running only on %s", workersList.Items[0].Name))
for i := 0; i < len(components); i++ {
podName, err := exutil.GetPodName(oc, opNamespace, "component="+components[i], workersList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).NotTo(o.BeEmpty())
}
} else {
exutil.By(fmt.Sprintf("Validate metallb pods are not running on %s", workersList.Items[0].Name))
for i := 0; i < len(components); i++ {
podName, err := exutil.GetPodName(oc, opNamespace, "component="+components[i], workersList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podName).To(o.BeEmpty())
}
}
removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0805aff4-5bd5-49a0-9f1c-feb659868910
|
Author:asood-NonHyperShiftHOST-High-54823-Validate controller and speaker pods are scheduled on nodes based priority class. [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-NonHyperShiftHOST-High-54823-Validate controller and speaker pods are scheduled on nodes based priority class. [Serial]", func() {
var (
metallbCRPriorityClassFile = filepath.Join(testDataDir, "metallb-cr-priority-class.yaml")
metallbPriorityClassFile = filepath.Join(testDataDir, "metallb-priority-class.yaml")
components = []string{"controller", "speaker", "frr-k8s"}
)
exutil.By("Create meatllb CR with priority class")
createResourceFromFile(oc, opNamespace, metallbCRPriorityClassFile)
defer removeResource(oc, true, true, "metallb", "metallb", "-n", opNamespace)
exutil.By("Validate metallb CR not created as priority class is not yet created")
// just check the daemon sets as pods are not expected to be scheduled
for i := 1; i < len(components); i++ {
o.Eventually(func() bool {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", components[i], "--timeout", "10s").Output()
o.Expect(dsStatusErr).To(o.HaveOccurred())
return strings.Contains(dsStatus, "0 out of")
}, "60s", "10s").Should(o.BeTrue(), "Pods did not reach running status")
}
createResourceFromFile(oc, opNamespace, metallbPriorityClassFile)
defer removeResource(oc, true, true, "priorityclass", "metallb-high-priority")
exutil.By("Validate metallb CR is created after priority class is created")
for j, component := range components {
if j == 0 {
err := waitForPodWithLabelReady(oc, opNamespace, "component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
dsStatus, dsStatusErr := oc.AsAdmin().WithoutNamespace().Run("rollout").Args("status", "-n", opNamespace, "ds", component, "--timeout", "60s").Output()
o.Expect(dsStatusErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(dsStatus, "successfully rolled out")).To(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
e2f6b9ec-d8fe-4678-b3a8-c738b5ed25be
|
Author:asood-High-43075-Create L2 LoadBalancer Service [Serial]
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-43075-Create L2 LoadBalancer Service [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "43075"
)
exutil.By("1. Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("2. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("SUCCESS - IP Addresspool")
exutil.By("3. Create L2Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
g.By("4. Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
g.By("4.1 Create a service with ExtenalTrafficPolicy Local")
svc1 := loadBalancerServiceResource{
name: "hello-world-local",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Local",
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc1, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
g.By("4.2 Create a service with ExtenalTrafficPolicy Cluster")
svc2 := loadBalancerServiceResource{
name: "hello-world-cluster",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc2, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - Services created successfully")
exutil.By("4.3 Validate LoadBalancer services")
err = checkLoadBalancerSvcStatus(oc, svc1.namespace, svc1.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc1.namespace, svc1.name)
e2e.Logf("The service %s External IP is %q", svc1.name, svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc2.namespace, svc2.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc2.namespace, svc2.name)
e2e.Logf("The service %s External IP is %q", svc2.name, svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
2293bef1-94b6-4cb7-8d27-5dc692472aa3
|
Author:asood-High-53333-High-49622-Verify for the service IP address of NodePort or LoadBalancer service ARP requests gets response from one interface only and prometheus metrics are updated when service is removed. [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-53333-High-49622-Verify for the service IP address of NodePort or LoadBalancer service ARP requests gets response from one interface only and prometheus metrics are updated when service is removed. [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "53333"
)
exutil.By("Test case for bug ID 2054225")
exutil.By("1.0 Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By(fmt.Sprintf("1.1 Add label to operator namespace %s to enable monitoring", opNamespace))
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", opNamespace, "openshift.io/cluster-monitoring-").Execute()
labelErr := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", opNamespace, "openshift.io/cluster-monitoring=true").Execute()
o.Expect(labelErr).NotTo(o.HaveOccurred())
exutil.By("2. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("SUCCESS - IP Addresspool")
exutil.By("3. Create L2Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("4. Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
exutil.By("4.1 Create a service with ExtenalTrafficPolicy Cluster")
svc := loadBalancerServiceResource{
name: "hello-world-cluster",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("SUCCESS - Services created successfully")
exutil.By("4.2 Validate LoadBalancer services")
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("5. Validate MAC Address assigned to service")
exutil.By("5.1 Get the node announcing the service IP")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node announcing the service IP %s ", nodeName)
g.By("5.2 Obtain MAC address for Load Balancer Service IP")
macAddress, result := obtainMACAddressForIP(oc, masterNodeList[0], svcIP, 5)
o.Expect(result).To(o.BeTrue())
o.Expect(macAddress).NotTo(o.BeEmpty())
e2e.Logf("MAC address by ARP Lookup %s ", macAddress)
exutil.By("5.3 Get MAC address configured on the node interface announcing the service IP Address")
macAddress1 := getNodeMacAddress(oc, nodeName)
o.Expect(macAddress1).NotTo(o.BeEmpty())
e2e.Logf("MAC address of announcing node %s ", macAddress1)
o.Expect(strings.ToLower(macAddress)).Should(o.Equal(macAddress1))
exutil.By("OCP-49622 LoadBalancer service prometheus metrics are updated when service is removed")
l2Metrics := "metallb_speaker_announced"
exutil.By(fmt.Sprintf("6.1 Get %s metrics for the service %s at %s IP Address", l2Metrics, svc.name, svcIP))
o.Expect(checkPrometheusMetrics(oc, 10*time.Second, 200*time.Second, false, l2Metrics, true)).To(o.BeTrue())
exutil.By("6.2 Delete the service and check meterics are removed")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
o.Expect(checkPrometheusMetrics(oc, 5*time.Second, 30*time.Second, true, l2Metrics, false)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ef07d95e-2a5c-4a79-b0ef-c17486405b48
|
Author:asood-High-60182-Verify the nodeport is not allocated to VIP based LoadBalancer service type [Disruptive]
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-60182-Verify the nodeport is not allocated to VIP based LoadBalancer service type [Disruptive]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
svc_names = [2]string{"hello-world-cluster", "hello-world-local"}
svc_etp = [2]string{"Cluster", "Local"}
)
exutil.By("1. Determine suitability of worker nodes for the test")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("2. Create two namespace")
for i := 0; i < 2; i++ {
oc.SetupProject()
ns = oc.Namespace()
namespaces = append(namespaces, ns)
g.By("Label the namespace")
_, err := oc.AsAdmin().Run("label").Args("namespace", ns, namespaceLabelKey+"="+namespaceLabelValue[0], "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("3. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
g.By("SUCCESS - IP Addresspool")
g.By("4. Create L2Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
for i := 0; i < 2; i++ {
g.By("5.1 Create a service with extenaltrafficpolicy " + svc_etp[i])
svc := loadBalancerServiceResource{
name: svc_names[i],
namespace: namespaces[i],
externaltrafficpolicy: svc_etp[i],
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: false,
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
g.By("5.2 LoadBalancer service with name " + svc_names[i])
g.By("5.2.1 Check LoadBalancer service is created")
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("5.2.2 Get LoadBalancer service IP")
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
g.By("5.2.3 Get LoadBalancer service IP announcing node")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
g.By("5.2.4 Validate service")
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
g.By("5.2.5 Check nodePort is not assigned to service")
nodePort := getLoadBalancerSvcNodePort(oc, svc.namespace, svc.name)
o.Expect(nodePort).To(o.BeEmpty())
}
g.By("6. Change the shared gateway mode to local gateway mode")
var desiredMode string
origMode := getOVNGatewayMode(oc)
if origMode == "local" {
desiredMode = "shared"
} else {
desiredMode = "local"
}
e2e.Logf("Cluster is currently on gateway mode %s", origMode)
e2e.Logf("Desired mode is %s", desiredMode)
defer switchOVNGatewayMode(oc, origMode)
switchOVNGatewayMode(oc, desiredMode)
g.By("7. Validate services in modified gateway mode " + desiredMode)
for i := 0; i < 2; i++ {
g.By("7.1 Create a service with extenal traffic policy " + svc_etp[i])
svc_names[i] = svc_names[i] + "-0"
svc := loadBalancerServiceResource{
name: svc_names[i],
namespace: namespaces[i],
externaltrafficpolicy: svc_etp[i],
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: false,
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
g.By("7.2 LoadBalancer service with name " + svc_names[i])
g.By("7.2.1 Check LoadBalancer service is created")
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("7.2.2 Get LoadBalancer service IP")
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
g.By("7.2.3 Get LoadBalancer service IP announcing node")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
g.By("7.2.4 Validate service")
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
g.By("7.2.5 Check nodePort is not assigned to service")
nodePort := getLoadBalancerSvcNodePort(oc, svc.namespace, svc.name)
o.Expect(nodePort).To(o.BeEmpty())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
2e8c4bad-2f5c-4cd9-8002-07a5bae88a30
|
Author:asood-Longduration-NonPreRelease-High-60513-High-60514-High-60515-High-60518-High-60519-Verify L2 service is reachable if service IP is advertised from specific interface on node using one or more L2 advertisements through the updates to L2 advetisements and gets indication if interface is not configured[Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-Longduration-NonPreRelease-High-60513-High-60514-High-60515-High-60518-High-60519-Verify L2 service is reachable if service IP is advertised from specific interface on node using one or more L2 advertisements through the updates to L2 advetisements and gets indication if interface is not configured[Serial]", func() {
var (
ns string
namespaces []string
testID = "60513"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
vmWorkers []string
workers []string
ipaddresspools []string
)
//Two worker nodes needed to create l2advertisement object
exutil.By("0. Determine suitability of worker nodes for the test")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
for i := 0; i < len(workerList.Items); i++ {
if strings.Contains(workerList.Items[i].Name, "worker") {
vmWorkers = append(vmWorkers, workerList.Items[i].Name)
} else {
workers = append(workers, workerList.Items[i].Name)
}
}
e2e.Logf("Virtual Nodes %s", vmWorkers)
e2e.Logf("Real Nodes %s", workers)
if len(workers) < 1 || len(vmWorkers) < 1 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes, virtual and real each.")
}
vmList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("2. Get the master nodes in the cluster for validating service")
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By("3. Create IP addresspools")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By(fmt.Sprintf("IP address pool %s created successfully", ipaddresspools[:]))
//Ensure address is not assigned from address pool automatically by setting autoAssign to false
addressList, err := json.Marshal(l2Addresses[1][:])
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"autoAssign\": false, \"addresses\": %s}}", string(addressList))
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[1], patchInfo, "metallb-system")
exutil.By("4. Create L2 Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
//Just assign one of the addresspool, use the second one for later
ipaddrpools := []string{ipaddresspools[0], ""}
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("5.0 60513 Verify L2 service with ETP Local or Cluster is reachable if service IP is advertised from specific interface on node.")
exutil.By(fmt.Sprintf("5.1 Patch L2 Advertisement to ensure one interface that allows functionl services for test case %s", testID))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
exutil.By("5.2 Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-0",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
exutil.By(fmt.Sprintf("5.3. Create a service with ETP cluster with name %s", svc.name))
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
exutil.By("5.4 Validate LoadBalancer services")
svcErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr := wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
svc.name = "hello-world-" + testID + "-1"
svc.externaltrafficpolicy = "Local"
exutil.By(fmt.Sprintf("5.5 Create a service with ETP %s with name %s", svc.externaltrafficpolicy, svc.name))
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
exutil.By("5.6 Validate LoadBalancer services")
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, masterNodeList[0], svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
testID = "60514"
exutil.By("6.0 60514 Verify user is given indication if specified interface does not exist on any of the selected node in L2 advertisement")
exutil.By(fmt.Sprint("6.1 Patch L2 Advertisement to use interface that does not exist on nodes for test case", testID))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"eno1\"]}}", "metallb-system")
exutil.By(fmt.Sprintf("6.2 Create service for test case %s", testID))
svc.name = "hello-world-" + testID
svc.externaltrafficpolicy = "Cluster"
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("6.3 Check the event is generated for the interface")
isEvent, _ := checkServiceEvents(oc, svc.name, svc.namespace, "announceFailed")
o.Expect(isEvent).To(o.BeTrue())
exutil.By("6.4 Validate LoadBalancer service is not reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
//There should not be any MAC address associated with service IP.
_, macAddressResult := obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
exutil.By("6.5 Validate LoadBalancer service is reachable after L2 Advertisement is updated")
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
testID = "60515"
exutil.By("7.0 60515 Verify service IP from IP addresspool for set of worker nodes is announced from a specific interface")
exutil.By(fmt.Sprintf("8.1 Update interfaces and nodeSelector of %s", l2advertisement.name))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"eno1\", \"eno2\"]}}", "metallb-system")
patchNodeSelector := fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(vmList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, patchNodeSelector, "metallb-system")
exutil.By("7.2 Create L2 service that is unreachable")
svc.name = "hello-world-" + testID
svc.externaltrafficpolicy = "Cluster"
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("7.3 Validate LoadBalancer service is not reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
_, macAddressResult = obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
exutil.By("7.4 Create another l2advertisement CR with same ip addresspool but different set of nodes and interface")
l2advertisement1 := l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement1.name, "-n", l2advertisement1.namespace)
result = createL2AdvertisementCR(oc, l2advertisement1, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
patchNodeSelector = fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(vmList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, patchNodeSelector, "metallb-system")
exutil.By("7.5 Check the event is not generated for the interface")
isEvent, _ = checkServiceEvents(oc, svc.name, svc.namespace, "announceFailed")
o.Expect(isEvent).To(o.BeFalse())
exutil.By("7.6 Get LoadBalancer service IP announcing node")
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
exutil.By("7.7 Verify the service is functional as the another L2 advertisement is used for the ip addresspool")
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
testID = "60518"
i := 0
var svcIPs []string
exutil.By("8.0 60518 Verify configuration changes like updating the L2 advertisement to add interface, removing L2advertisement and updating addresspool works.")
removeResource(oc, true, true, "l2advertisements", l2advertisement1.name, "-n", l2advertisement1.namespace)
exutil.By(fmt.Sprintf("8.1 Update interfaces and nodeSelector of %s", l2advertisement.name))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"br-ex\", \"eno2\"]}}", "metallb-system")
patchNodeSelector = fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(vmList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, patchNodeSelector, "metallb-system")
exutil.By("8.2 Create L2 service")
svc.name = "hello-world-" + testID + "-" + strconv.Itoa(i)
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("8.3 Validate LoadBalancer service is reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
exutil.By(fmt.Sprintf("8.4 Delete the L2 advertisement resource named %s", l2advertisement.name))
removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
exutil.By(fmt.Sprintf("8.5 Validate service with name %s is unreachable", svc.name))
nodeName = getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("%s is announcing the service %s with IP %s ", nodeName, svc.name, svcIP)
_, macAddressResult = obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
svcIPs = append(svcIPs, svcIP)
exutil.By("8.6 Create another service request IP address from second IP addresspool, so see it is unreachable")
i = i + 1
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
annotatedSvc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-" + strconv.Itoa(i),
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: serviceSelectorValue[0],
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddresspools[1],
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The %s service created successfully with %s with annotation %s:%s", annotatedSvc.name, svcIP, annotatedSvc.annotationKey, annotatedSvc.annotationValue)
svcIPs = append(svcIPs, svcIP)
_, macAddressResult = obtainMACAddressForIP(oc, masterNodeList[1], svcIP, 5)
o.Expect(macAddressResult).To(o.BeFalse())
exutil.By("8.7 Create L2 Advertisements with both ip address pools")
l2advertisement = l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
addrPoolList, err := json.Marshal(ipaddresspools)
o.Expect(err).NotTo(o.HaveOccurred())
patchIPAddresspools := fmt.Sprintf("{\"spec\":{\"ipAddressPools\": %s}}", string(addrPoolList))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, patchIPAddresspools, "metallb-system")
exutil.By("8.8 Both services are functional")
for i = 0; i < 2; i++ {
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIPs[i])
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service at %s to be reachable but was unreachable", svcIPs[i]))
}
testID = "60519"
exutil.By("9.0 60519 Verify interface can be selected across l2advertisements.")
exutil.By(fmt.Sprintf("9.1 Update interface list of %s L2 Advertisement object to non functional", l2advertisement.name))
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement.name, "{\"spec\":{\"interfaces\": [\"eno1\", \"eno2\"]}}", "metallb-system")
exutil.By("9.2 Create another L2 Advertisement")
l2advertisement1 = l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement1.name, "-n", l2advertisement1.namespace)
result = createL2AdvertisementCR(oc, l2advertisement1, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, "{\"spec\":{\"interfaces\": [\"br-ex\"]}}", "metallb-system")
patchResourceAsAdmin(oc, "l2advertisements/"+l2advertisement1.name, "{\"spec\":{\"nodeSelectors\": []}}", "metallb-system")
exutil.By("9.3 Create L2 Service")
svc.name = "hello-world-" + testID
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
e2e.Logf("The %s service created successfully", svc.name)
svcErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
exutil.By("9.4 Validate LoadBalancer service is reachable")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %s", svc.name, svcIP)
checkSvcErr = wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", svc.name, svcIP))
})
| |||||
test case
|
openshift/openshift-tests-private
|
6b64b0d4-9f2e-4280-9975-d32c43ab50a2
|
Author:asood-High-43155-High-43156-High-43313-Verify static address is associated with LoadBalancer service specified in YAML, approriate messages are logged if it cannot be and services can share IP [Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-43155-High-43156-High-43313-Verify static address is associated with LoadBalancer service specified in YAML, approriate messages are logged if it cannot be and services can share IP [Serial]", func() {
var (
ns string
namespaces []string
testID = "43155"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
vmWorkers []string
ipaddresspools []string
requestedIp = "192.168.111.65"
)
//Two worker nodes needed to create l2advertisement object
exutil.By("1. Determine suitability of worker nodes for the test")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes, virtual and real each.")
}
for i := 0; i < 2; i++ {
vmWorkers = append(vmWorkers, workerList.Items[i].Name)
}
exutil.By("2. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("3. Create IP addresspools")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By(fmt.Sprintf("IP address pool %s created successfully", ipaddresspools[:]))
//Ensure address is not assigned from address pool automatically by setting autoAssign to false
addressList, err := json.Marshal(l2Addresses[1][:])
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"autoAssign\": false, \"addresses\": %s, \"serviceAllocation\":{\"serviceSelectors\":[], \"namespaces\":[\"%s\"], \"namespaceSelectors\":[] }}}", string(addressList), "test-"+testID)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[1], patchInfo, "metallb-system")
exutil.By("4. Create L2 Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
//Just assign one of the addresspool, use the second one later
ipaddrpools := []string{ipaddresspools[0], ""}
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddrpools[:],
interfaces: interfaces[:],
nodeSelectorValues: vmWorkers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("5.0 %s Verify L2 service requesting specific IP %s.", testID, requestedIp))
exutil.By("5.1 Create L2 LoadBalancer service with annotated IP address")
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
annotatedSvc := loadBalancerServiceResource{
name: "hello-world-" + testID,
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: serviceSelectorValue[0],
annotationKey: "metallb.universe.tf/loadBalancerIPs",
annotationValue: requestedIp,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
exutil.By(fmt.Sprintf("5.2. Create a service with ETP Cluster with name %s", annotatedSvc.name))
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("5.3 Validate LoadBalancer service")
svcErr := checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The service %s External IP is %s", annotatedSvc.name, svcIP)
checkSvcErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 4*time.Minute, false, func(ctx context.Context) (bool, error) {
result := validateService(oc, proxyHost, svcIP)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", annotatedSvc.name, svcIP))
testID = "43156"
exutil.By(fmt.Sprintf("6.0 %s Verify L2 service requesting IP from pool %s for AllocationFailed.", testID, ipaddresspools[1]))
exutil.By("6.1 Create L2 LoadBalancer service with annotated IP address pool")
annotatedSvc.name = "hello-world-" + testID + "-0"
annotatedSvc.annotationKey = "metallb.universe.tf/address-pool"
annotatedSvc.annotationValue = ipaddresspools[1]
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("6.2 Validate LoadBalancer service")
//Use interval and timeout as it is expected IP assignment will fail
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name, 5*time.Second, 30*time.Second)
o.Expect(svcErr).To(o.HaveOccurred())
exutil.By("6.3 Validate allocation failure reason")
isEvent, msg := checkServiceEvents(oc, annotatedSvc.name, annotatedSvc.namespace, "AllocationFailed")
o.Expect(isEvent).To(o.BeTrue())
o.Expect(strings.Contains(msg, fmt.Sprintf("pool %s not compatible for ip assignment", ipaddresspools[1]))).To(o.BeTrue())
exutil.By("6.4 Update IP address pool %s address range for already used IP address")
patchInfo = fmt.Sprintf("{\"spec\":{\"addresses\":[\"%s-%s\"]}}", requestedIp, requestedIp)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[0], patchInfo, "metallb-system")
exutil.By("6.5 Create another service AllocationFailed reason ")
annotatedSvc.name = "hello-world-" + testID + "-1"
annotatedSvc.annotationKey = "metallb.universe.tf/address-pool"
annotatedSvc.annotationValue = ipaddresspools[0]
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("6.6 Validate LoadBalancer service")
//Use interval and timeout as it is expected IP assignment will fail
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name, 5*time.Second, 30*time.Second)
o.Expect(svcErr).To(o.HaveOccurred())
exutil.By("6.7 Validate allocation failure reason")
isEvent, msg = checkServiceEvents(oc, annotatedSvc.name, annotatedSvc.namespace, "AllocationFailed")
o.Expect(isEvent).To(o.BeTrue())
o.Expect(strings.Contains(msg, fmt.Sprintf("no available IPs in pool \"%s\"", ipaddresspools[0]))).To(o.BeTrue())
testID = "43313"
exutil.By(fmt.Sprintf("7.0 %s Verify one address can be associated with more than one service using annotation metallb.universe.tf/allow-shared-ip", testID))
exutil.By(fmt.Sprintf("7.1 Patch IP addresspool pool %s address range to original range", ipaddresspools[0]))
addressList, err = json.Marshal(l2Addresses[0][:])
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo = fmt.Sprintf("{\"spec\":{\"addresses\": %s}}", string(addressList))
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddresspools[0], patchInfo, "metallb-system")
annotationForSvc := fmt.Sprintf("\"shared-ip-%s-svc\"", testID)
exutil.By("7.2 Create first L2 LoadBalancer service with annotation")
annotatedSvc.name = "hello-world-" + testID + "-tcp"
annotatedSvc.annotationKey = "metallb.universe.tf/allow-shared-ip"
annotatedSvc.annotationValue = annotationForSvc
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("7.3 Validate LoadBalancer service is assigned an IP")
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP1 := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The service %s External IP is %s", annotatedSvc.name, svcIP1)
exutil.By("7.4 Create second L2 LoadBalancer service with annotation")
annotatedSvc.name = "hello-world-" + testID + "-udp"
annotatedSvc.annotationKey = "metallb.universe.tf/allow-shared-ip"
annotatedSvc.annotationValue = annotationForSvc
annotatedSvc.protocol = "UDP"
defer removeResource(oc, true, true, "service", annotatedSvc.name, "-n", annotatedSvc.namespace)
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
exutil.By("7.5 Validate LoadBalancer service is assigned an IP")
svcErr = checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(svcErr).NotTo(o.HaveOccurred())
svcIP2 := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The service %s External IP is %s", annotatedSvc.name, svcIP2)
o.Expect(svcIP1).To(o.BeEquivalentTo(svcIP2))
exutil.By(fmt.Sprintf("7.6 Validate LoadBalancer services sharing the IP address %s", svcIP1))
exutil.By(fmt.Sprintf("7.6.1 LoadBalancer service at IP address %s configured with TCP", svcIP1))
checkSvcErr = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 4*time.Minute, false, func(ctx context.Context) (bool, error) {
result := validateService(oc, proxyHost, svcIP1)
if result {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(checkSvcErr, fmt.Sprintf("Expected service %s at %s to be reachable but was unreachable", annotatedSvc.name, svcIP))
exutil.By(fmt.Sprintf("7.6.2 LoadBalancer service at IP address %s configured with UDP", svcIP2))
allUdpSvcPods, getPodsErr := exutil.GetAllPodsWithLabel(oc, ns, "name="+annotatedSvc.name)
o.Expect(getPodsErr).NotTo(o.HaveOccurred())
exutil.By("Listen on port 80 on a backend pod of UDP service")
e2e.Logf("Listening on pod %s", allUdpSvcPods[0])
cmdNcat, cmdOutput, _, ncatCmdErr := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", ns, allUdpSvcPods[0], "bash", "-c", `timeout --preserve-status 60 ncat -u -l 8080`).Background()
defer cmdNcat.Process.Kill()
o.Expect(ncatCmdErr).NotTo(o.HaveOccurred())
allTcpSvcPods, getPodsErr := exutil.GetAllPodsWithLabel(oc, ns, "name=hello-world-"+testID+"-tcp")
o.Expect(getPodsErr).NotTo(o.HaveOccurred())
e2e.Logf("Sending UDP packets from pod %s to service %s", allTcpSvcPods[0], annotatedSvc.name)
cmd := fmt.Sprintf("echo hello | ncat -v -u %s 80", svcIP2)
for i := 0; i < 5; i++ {
output, ncatCmdErr := execCommandInSpecificPod(oc, ns, allTcpSvcPods[0], cmd)
o.Expect(ncatCmdErr).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(output), "bytes sent")).To(o.BeTrue())
}
e2e.Logf("UDP pod server output %s", cmdOutput)
o.Expect(strings.Contains(cmdOutput.String(), "hello")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
10e0a223-395c-41a7-a2f5-dfb926324072
|
Author:asood-High-64809-ovnkube-node sends netlink delete request deleting conntrack entries for API redirect iptables rule [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-64809-ovnkube-node sends netlink delete request deleting conntrack entries for API redirect iptables rule [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "64809"
)
exutil.By("1. Get API VIP for cluster and Node hosting the VIP")
apiVIP := GetAPIVIPOnCluster(oc)
if apiVIP == "" {
g.Skip("This case requires API VIP to configured on the cluster")
}
apiVIPNode := FindVIPNode(oc, apiVIP)
if apiVIPNode == "" {
g.Skip("This case requires API VIP to configured on the cluster on one of nodes, found none")
}
e2e.Logf("API VIP %s on the cluster is configured on %s", apiVIP, apiVIPNode)
exutil.By("2. Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("3. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("4. Create L2 Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result = createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
conntrackRulesCmd := fmt.Sprintf("conntrack -E -o timestamp | grep %s | grep DESTROY | grep -v CLOSE | grep 6443 | grep ESTABL", apiVIP)
cmdContrackRulesdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("node/"+apiVIPNode, "--", "chroot", "/host", "bash", "-c", conntrackRulesCmd).Background()
defer cmdContrackRulesdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5. Create LoadBalancer services using Layer 2 addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: false,
externaltrafficpolicy: "Cluster",
template: loadBalancerServiceTemplate,
}
for i := 0; i < 10; i++ {
svc.name = "hello-world-" + testID + "-" + strconv.Itoa(i)
exutil.By(fmt.Sprintf("Create a service %s with ExtenalTrafficPolicy Cluster", svc.name))
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("Validate LoadBalancer service %s", svc.name))
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("LB service created with IP %s", svcIP)
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By(fmt.Sprintf("DeleteLoadBalancer service %s", svc.name))
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
}
e2e.Logf("Conntrack rules output \n%s", cmdOutput.String())
o.Expect(strings.Contains(cmdOutput.String(), "")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7170d93e-3b26-491e-9833-f0fa1fbb3e7a
|
Author:qiowang-High-51186-High-54819-Validate ipAddressPoolSelector, ipAddressPool and nodeSelector are honored when advertising service IP address with L2 advertisement [Serial]
|
['"context"', '"encoding/json"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:qiowang-High-51186-High-54819-Validate ipAddressPoolSelector, ipAddressPool and nodeSelector are honored when advertising service IP address with L2 advertisement [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
ipAddressPoolSelectorsKey = "zone"
ipAddressPoolSelectorsValues = [2][2]string{{"east"}, {"west"}}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
ipaddresspools []string
testID = "51186"
expectedAddress1 = "192.168.111.65"
expectedAddress2 = "192.168.111.75"
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
loadBalancerServiceAnnotatedTemplate = filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
)
exutil.By("1. Obtain the masters, workers and namespace")
//Two worker nodes needed to create l2advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
masterNodeList, err1 := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err1).NotTo(o.HaveOccurred())
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("2. Create two IP addresspools with different labels")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + testID + "-" + strconv.Itoa(i),
namespace: opNamespace,
addresses: l2Addresses[i][:],
namespaces: namespaces,
label1: ipAddressPoolSelectorsKey,
value1: ipAddressPoolSelectorsValues[i][0],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By("3. Create L2Advertisement with ipAddressPool and nodeSelectors")
l2advertisement := l2AdvertisementResource{
name: "l2-adv" + testID,
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
exutil.By("4. Create LoadBalancer services using Layer 2 addresses")
svc := loadBalancerServiceResource{
name: "hello-world-cluster",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddresspools[0],
allocateLoadBalancerNodePorts: serviceNodePortAllocation,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
exutil.By("5. Check IP address assigned from addresspool, and advertised only on one of the node listed in l2advertisements")
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
nodeName := getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node %s announcing the service IP", nodeName)
o.Expect(nodeName).Should(o.Or(o.Equal(workers[0]), o.Equal(workers[1])))
exutil.By("6. Remove the previously created services")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
removeResource(oc, true, true, "replicationcontroller", svc.name, "-n", svc.namespace)
exutil.By("7. Update L2Advertisement, update ipAddressPool and nodeSelectors, add ipAddressPoolSelectors")
patchL2Advertisement := `[{"op": "replace", "path": "/spec/ipAddressPools", "value": [""]}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["` + workers[1] + `"]}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "l2advertisement", l2advertisement.name, "--type=json", "-p", patchL2Advertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
patchIPAddrPoolSelectors := `{"spec":{"ipAddressPoolSelectors":[{"matchExpressions": [{"key": "` + ipAddressPoolSelectorsKey + `","operator": "In","values": ["` + ipAddressPoolSelectorsValues[1][0] + `"]}]}]}}`
patchResourceAsAdmin(oc, "l2advertisement/"+l2advertisement.name, patchIPAddrPoolSelectors, "metallb-system")
exutil.By("8. Create LoadBalancer services requesting address from the second ipaddresspools")
svc.annotationValue = ipaddresspools[1]
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
exutil.By("9. Check IP address assigned from the second addresspool, and advertised only on one of the node listed in l2advertisements")
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress2)).To(o.BeTrue())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
nodeName = getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node %s announcing the service IP", nodeName)
o.Expect(nodeName).Should(o.Equal(workers[1]))
exutil.By("10. OCP-54819-Add label to the first worker node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[0], "zone")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[0], "zone", "east")
exutil.By("11. OCP-54819-Edit the l2advertisement to modify the node selection")
patchL2Advertisement = `[{"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/key", "value":"zone"}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["east"]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "l2advertisement", l2advertisement.name, "--type=json", "-p", patchL2Advertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("12. OCP-54819-Check the changes to nodeSelector in L2advertisements are reflected where the service IP is announced")
nodeName = getNodeAnnouncingL2Service(oc, svc.name, svc.namespace)
e2e.Logf("Node %s announcing the service IP", nodeName)
o.Expect(nodeName).Should(o.Equal(workers[0]))
})
| |||||
test case
|
openshift/openshift-tests-private
|
b0cd7e43-155a-4259-84a8-4d0e0b968108
|
Author:meinli-High-43243-The L2 service with externalTrafficPolicy Local continues to service requests even when node announcing the service goes down. [Disruptive]
|
['"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:meinli-High-43243-The L2 service with externalTrafficPolicy Local continues to service requests even when node announcing the service goes down. [Disruptive]", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
ipAddresspoolFile = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
pingPodNodeTemplate = filepath.Join(buildPruningBaseDir, "ping-for-pod-specific-node-template.yaml")
genericServiceTemplate = filepath.Join(buildPruningBaseDir, "service-generic-template.yaml")
ipaddresspools []string
namespaces []string
serviceSelectorKey = "name"
serviceSelectorValue = [1]string{"test-service"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
)
exutil.By("1. Get the namespace, masters and workers")
workerList := excludeSriovNodes(oc)
if len(workerList) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than three nodes")
}
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test43243")
exutil.By("2. create address pool with addresses from worker nodes")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolFile,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolFile)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
e2e.Logf("IP address pools %s ", ipaddresspools)
exutil.By("3. create a L2 advertisement using the above addresspool")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
nodeSelectorValues: workerList[:],
interfaces: interfaces[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("4. create a service with externalTrafficPolicy Local")
for i := 0; i < 2; i++ {
pod := pingPodResourceNode{
name: "hello-pod-" + strconv.Itoa(i),
namespace: ns,
nodename: workerList[i],
template: pingPodNodeTemplate,
}
pod.createPingPodNode(oc)
waitPodReady(oc, ns, pod.name)
}
svc := genericServiceResource{
servicename: "test-service",
namespace: ns,
protocol: "TCP",
selector: "hello-pod",
serviceType: "LoadBalancer",
ipFamilyPolicy: "SingleStack",
internalTrafficPolicy: "Cluster",
externalTrafficPolicy: "Local",
template: genericServiceTemplate,
}
svc.createServiceFromParams(oc)
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.servicename)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.servicename)
e2e.Logf("The service %s External IP is %q", svc.servicename, svcIP)
result = validateService(oc, proxyHost, svcIP+":27017")
o.Expect(result).To(o.BeTrue())
exutil.By("5. Validate service IP announcement being taken over by another node")
nodeName1 := getNodeAnnouncingL2Service(oc, svc.servicename, ns)
defer checkNodeStatus(oc, nodeName1, "Ready")
rebootNode(oc, nodeName1)
checkNodeStatus(oc, nodeName1, "NotReady")
nodeName2 := getNodeAnnouncingL2Service(oc, svc.servicename, ns)
o.Expect(strings.Join(workerList, ",")).Should(o.ContainSubstring(nodeName2))
if nodeName2 != nodeName1 {
e2e.Logf("%s worker node taken over the service successfully!!!", nodeName2)
} else {
e2e.Fail("No worker node taken over the service after reboot")
}
// verify the service request after another worker nodeAssigned
for i := 0; i < 2; i++ {
o.Expect(validateService(oc, proxyHost, svcIP+":27017")).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d10d3375-2027-4631-a25b-fe5526700796
|
Author:meinli-High-43242-The L2 service with externalTrafficPolicy Cluster continues to service requests even when node announcing the service goes down. [Disruptive]
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:meinli-High-43242-The L2 service with externalTrafficPolicy Cluster continues to service requests even when node announcing the service goes down. [Disruptive]", func() {
var (
ipAddresspoolFile = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
ipaddresspools []string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
)
exutil.By("1. Get the namespace, masters and workers")
workerList := excludeSriovNodes(oc)
if len(workerList) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test43242")
exutil.By("2. create address pool with addresses from worker nodes")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[i][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolFile,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolFile)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
e2e.Logf("IP address pools %s ", ipaddresspools)
exutil.By("3. create a L2 advertisement using the above addresspool")
l2advertisement := l2AdvertisementResource{
name: "l2-adv",
namespace: opNamespace,
ipAddressPools: ipaddresspools[:],
nodeSelectorValues: workerList[:],
interfaces: interfaces[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
result := createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)
o.Expect(result).To(o.BeTrue())
exutil.By("4. create a service with externalTrafficPolicy Cluster")
svc := loadBalancerServiceResource{
name: "test-rc",
namespace: ns,
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
externaltrafficpolicy: "Cluster",
allocateLoadBalancerNodePorts: false,
template: loadBalancerServiceTemplate,
}
result = createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)
o.Expect(result).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("rc", "test-rc", "--replicas=10", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, ns, "name="+svc.name)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("this pod with label name=%s not ready", svc.name))
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
result = validateService(oc, proxyHost, svcIP+":80")
o.Expect(result).To(o.BeTrue())
exutil.By("5. Validate service IP announcement being taken over by another node")
nodeName1 := getNodeAnnouncingL2Service(oc, svc.name, ns)
defer checkNodeStatus(oc, nodeName1, "Ready")
rebootNode(oc, nodeName1)
checkNodeStatus(oc, nodeName1, "NotReady")
nodeName2 := getNodeAnnouncingL2Service(oc, svc.name, ns)
o.Expect(strings.Join(workerList, ",")).Should(o.ContainSubstring(nodeName2))
if nodeName2 != nodeName1 {
e2e.Logf("%s worker node taker over the service successfully!!!", nodeName2)
} else {
e2e.Fail("No worker node taker over the service after reboot")
}
// verify the service request after another worker nodeAssigned
for i := 0; i < 2; i++ {
o.Expect(validateService(oc, proxyHost, svcIP+":80")).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
e8bbc82b-f0c8-4c0c-bfbc-9209c1c557eb
|
Author:asood-High-60097-High-60098-High-60099-High-60159-Verify ip address is assigned from the ip address pool that has higher priority (lower value), matches namespace, service name or the annotated IP pool in service [Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-60097-High-60098-High-60099-High-60159-Verify ip address is assigned from the ip address pool that has higher priority (lower value), matches namespace, service name or the annotated IP pool in service [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
bgpPassword string
expectedAddress1 = "10.10.10.1"
expectedAddress2 = "10.10.12.1"
)
//Two worker nodes needed to create BGP Advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test60097")
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspools with different priority")
priority_val := 10
for i := 0; i < 2; i++ {
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + strconv.Itoa(i),
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[i][:],
namespaces: namespaces,
priority: priority_val,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
priority_val = priority_val + 10
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
}
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
addrPoolList, err := json.Marshal(ipaddrpools)
o.Expect(err).NotTo(o.HaveOccurred())
patchIPAddresspools := fmt.Sprintf("{\"spec\":{\"ipAddressPools\": %s}}", string(addrPoolList))
patchResourceAsAdmin(oc, "bgpadvertisements/"+bgpAdvertisement.name, patchIPAddresspools, "metallb-system")
exutil.By("7. Create a service to verify it is assigned address from the pool that has higher priority")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-60097",
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60097 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
exutil.By("OCP-60098 Verify ip address from pool is assigned only to the service in project matching namespace or namespaceSelector in ip address pool.")
exutil.By("8.0 Update first ipaddress pool's the match label and match expression for the namespace property")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[0], "{\"spec\":{\"serviceAllocation\": {\"namespaceSelectors\": [{\"matchExpressions\": [{\"key\": \"region\", \"operator\": \"In\", \"values\": [\"SA\"]}]}, {\"matchLabels\": {\"environ\": \"Dev\"}}]}}}", "metallb-system")
exutil.By("8.1 Update first ipaddress pool's priority")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[0], "{\"spec\":{\"serviceAllocation\": {\"priority\": 20}}}", "metallb-system")
exutil.By("8.2 Update first ipaddress pool's namespaces property")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[0], "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", "metallb-system")
exutil.By("9. Label the namespace")
_, errNs := oc.AsAdmin().Run("label").Args("namespace", ns, "environ=Test", "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
_, errNs = oc.AsAdmin().Run("label").Args("namespace", ns, "region=NA").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("10. Delete the service in namespace and recreate it to see the address assigned from the pool that matches namespace selector")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
svc.name = "hello-world-60098"
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60098 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress2)).To(o.BeTrue())
exutil.By("OCP-60099 Verify ip address from pool is assigned only to the service matching serviceSelector in ip address pool")
exutil.By("11.0 Update second ipaddress pool's the match label and match expression for the namespace property")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"namespaceSelectors\": [{\"matchExpressions\": [{\"key\": \"region\", \"operator\": \"In\", \"values\": [\"SA\"]}]}, {\"matchLabels\": {\"environ\": \"Dev\"}}]}}}", "metallb-system")
exutil.By("11.1 Update second ipaddress pool's namesapces")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", "metallb-system")
exutil.By("11.2 Update second ipaddress pool's service selector")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"serviceSelectors\": [{\"matchExpressions\": [{\"key\": \"environ\", \"operator\": \"In\", \"values\": [\"Dev\"]}]}]}}}", "metallb-system")
exutil.By("12. Delete the service in namespace and recreate it to see the address assigned from the pool that matches namespace selector")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
svc.name = "hello-world-60099"
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60099 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
exutil.By("OCP-60159 Verify the ip address annotation in service metallb.universe.tf/address-pool in namepace overrides the priority and service selectors in ip address pool.")
exutil.By("13. Delete the service created in namespace to ensure eligible IP address is released")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
exutil.By("14. Update the priority on second address to be eligible for address assignment")
patchResourceAsAdmin(oc, "ipaddresspools/"+ipaddrpools[1], "{\"spec\":{\"serviceAllocation\": {\"priority\": 10}}}", "metallb-system")
exutil.By("15. Label the namespace to ensure the both addresspools are eligible for address assignment")
_, errNs = oc.AsAdmin().Run("label").Args("namespace", ns, "environ=Dev", "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
_, errNs = oc.AsAdmin().Run("label").Args("namespace", ns, "region=SA", "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("16. Create a service with annotation to obtain IP from first addresspool")
loadBalancerServiceAnnotatedTemplate := filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
svc = loadBalancerServiceResource{
name: "hello-world-60159",
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: serviceSelectorValue[0],
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddrpools[0],
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-60159 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
f3f96cbf-cd06-4e53-8de8-167fbc5d81e3
|
Author:asood-High-50946-Medium-69612-Verify .0 and .255 addresses in IPAddressPool are handled with avoidBuggIPs and MetalLB exposes password in clear text [Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"os/exec"', '"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-50946-Medium-69612-Verify .0 and .255 addresses in IPAddressPool are handled with avoidBuggIPs and MetalLB exposes password in clear text [Serial]", func() {
var (
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
testID = "50946"
ipAddressList = [3]string{"10.10.10.0-10.10.10.0", "10.10.10.255-10.10.10.255", "10.10.10.1-10.10.10.1"}
expectedIPAddressList = [3]string{"10.10.10.0", "10.10.10.255", "10.10.10.1"}
bgpPassword string
)
//Two worker nodes needed to create BGP Advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("Label the namespace")
_, errNs := oc.AsAdmin().Run("label").Args("namespace", ns, namespaceLabelKey+"="+namespaceLabelValue[0], "--overwrite").Output()
o.Expect(errNs).NotTo(o.HaveOccurred())
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = "bgp-test"
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspools with three addresses, including two buggy ones")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: ipAddressList[:],
namespaces: namespaces[:],
priority: 0,
avoidBuggyIPs: false,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Create services to verify it is assigned buggy IP addresses")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
for i := 0; i < 2; i++ {
svc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-" + strconv.Itoa(i),
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedIPAddressList[i])).To(o.BeTrue())
}
exutil.By("8. Delete the previously created services and set avoidBuggyIP to true in ip address pool")
for i := 0; i < 2; i++ {
removeResource(oc, true, true, "service", "hello-world-"+testID+"-"+strconv.Itoa(i), "-n", namespaces[0])
}
addressList, err := json.Marshal(ipAddressList)
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"avoidBuggyIPs\": true, \"addresses\": %s}}", string(addressList))
patchResourceAsAdmin(oc, "ipaddresspools/"+ipAddresspool.name, patchInfo, "metallb-system")
exutil.By("9. Verify the service is created with ip address that is not a buggy")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID + "-3",
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s External IP is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedIPAddressList[2])).To(o.BeTrue())
exutil.By("10. OCPBUGS-3825 Check BGP password is not in clear text")
//https://issues.redhat.com/browse/OCPBUGS-3825
podList, podListErr := exutil.GetAllPodsWithLabel(oc, opNamespace, "component=frr-k8s")
o.Expect(podListErr).NotTo(o.HaveOccurred())
o.Expect(len(podList)).NotTo(o.Equal(0))
searchString := fmt.Sprintf("neighbor '%s' password <retracted>", peerIPAddress)
for _, pod := range podList {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", opNamespace, pod, "-c", "reloader").OutputToFile("podlog")
o.Expect(err).NotTo(o.HaveOccurred())
grepOutput, err := exec.Command("bash", "-c", "cat "+output+" | grep -i '"+searchString+"' | wc -l").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Found %s occurences in logs of %s pod", grepOutput, pod)
o.Expect(grepOutput).NotTo(o.Equal(0))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
1ba10268-16f6-4dee-b019-2549b1d5ca81
|
Author:qiowang-High-46652-Verify LoadBalancer service can be created running at Layer 3 using BGP peering with BFD profile [Serial]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:qiowang-High-46652-Verify LoadBalancer service can be created running at Layer 3 using BGP peering with BFD profile [Serial]", func() {
var (
workers []string
ipaddresspools []string
bgpPeers []string
namespaces []string
expectedHostPrefixes []string
bgpPassword string
expectedAddress1 = "10.10.10.1"
bfdEnabled = "yes"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
BFDProfileTemplate = filepath.Join(testDataDir, "bfdprofile-template.yaml")
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
BGPPeerTemplate = filepath.Join(testDataDir, "bgppeer-template.yaml")
bgpAdvertisementTemplate = filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
)
//Two worker nodes needed to create BGP Advertisement object
workerList, getWorkersErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getWorkersErr).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run on cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Create BFD profile")
BFDProfileCR := bfdProfileResource{
name: "bfd-profile-46652",
namespace: opNamespace,
detectMultiplier: 37,
echoMode: true,
echoReceiveInterval: 38,
echoTransmitInterval: 39,
minimumTtl: 10,
passiveMode: true,
receiveInterval: 35,
transmitInterval: 35,
template: BFDProfileTemplate,
}
defer removeResource(oc, true, true, "bfdprofile", BFDProfileCR.name, "-n", BFDProfileCR.namespace)
o.Expect(createBFDProfileCR(oc, BFDProfileCR)).To(o.BeTrue())
exutil.By("2. Set up upstream/external BGP router, enable BFD")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword, bfdEnabled, BFDProfileCR.name)).To(o.BeTrue())
exutil.By("3. Create IP addresspool")
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test46652")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-bgp-46652",
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
exutil.By("4. Create BGP Peer")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: "",
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
bgpPeers = append(bgpPeers, BGPPeerCR.name)
exutil.By("5. Patch the BGPPeer with BFD Profile")
patchBFDProfile := fmt.Sprintf("{\"spec\":{\"bfdProfile\": \"%s\"}}", BFDProfileCR.name)
patchResourceAsAdmin(oc, "bgppeer/"+BGPPeerCR.name, patchBFDProfile, "metallb-system")
exutil.By("6. Create BGP Advertisement")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-46652",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddresspools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("8. Check BFD Session is up")
o.Expect(checkBFDSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("9. Create a service")
svc := loadBalancerServiceResource{
name: "hello-world-46652",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
statusErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-46652 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("10. Verify route is advertised")
expectedHostPrefixes = append(expectedHostPrefixes, expectedAddress1+"/32")
o.Expect(verifyHostPrefixAdvertised(oc, bgpRouterNamespaceWithSuffix, expectedHostPrefixes)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
1a6c6f3b-231f-4ac3-acad-829bdb80ac1d
|
Author:asood-High-50945-Verify the L2 and L3 IP address can be assigned to services respectively from the IP address pool based on the advertisement.[Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-50945-Verify the L2 and L3 IP address can be assigned to services respectively from the IP address pool based on the advertisement.[Serial]", func() {
var (
testID = "50945"
workers []string
bgpPeers []string
namespaces []string
ipaddresspools = make(map[int][]string)
expectedHostPrefixes []string
bgpPassword string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
BGPPeerTemplate = filepath.Join(testDataDir, "bgppeer-template.yaml")
bgpAdvertisementTemplate = filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
l2AdvertisementTemplate = filepath.Join(testDataDir, "l2advertisement-template.yaml")
loadBalancerServiceAnnotatedTemplate = filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
loadBalancerServiceTemplate = filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
l2Addresses = [2][2]string{{"192.168.111.65-192.168.111.69", "192.168.111.70-192.168.111.74"}, {"192.168.111.75-192.168.111.79", "192.168.111.80-192.168.111.85"}}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
expectedAddressList = [2]string{"10.10.10.1", "192.168.111.65"}
)
//Two worker nodes needed to create BGP Advertisement object
workerList, getWorkersErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getWorkersErr).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run on cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("1. Get the namespace")
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test"+testID)
exutil.By("Label the namespace")
_, errNsLabel := oc.AsAdmin().Run("label").Args("namespace", ns, namespaceLabelKey+"="+namespaceLabelValue[0], "--overwrite").Output()
o.Expect(errNsLabel).NotTo(o.HaveOccurred())
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = "bgp-test"
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create L3 and L2 IP addresspools")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-bgp-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools[0] = append(ipaddresspools[0], ipAddresspool.name)
ipAddresspool.name = "ipaddresspool-l2-" + testID
ipAddresspool.addresses = l2Addresses[0][:]
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools[1] = append(ipaddresspools[1], ipAddresspool.name)
exutil.By("6. Create BGP and L2 Advertisements")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-" + testID,
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddresspools[0],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
l2advertisement := l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: ipaddresspools[1],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
svcList := [2]string{"-l3-", "-l2-"}
exutil.By("7. Create L2 and L3 service")
annotatedSvc := loadBalancerServiceResource{
name: "",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: "",
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
for i := 0; i < 2; i++ {
annotatedSvc.name = "hello-world" + svcList[i] + testID
annotatedSvc.annotationValue = ipaddresspools[i][0]
o.Expect(createLoadBalancerService(oc, annotatedSvc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, annotatedSvc.namespace, annotatedSvc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, annotatedSvc.namespace, annotatedSvc.name)
e2e.Logf("The %s service with annotation %s:%s created successfully, and assigned %s", annotatedSvc.name, annotatedSvc.annotationKey, annotatedSvc.annotationValue, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddressList[i])).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
}
exutil.By("8. Verify route is advertised")
expectedHostPrefixes = append(expectedHostPrefixes, expectedAddressList[0]+"/32")
o.Expect(verifyHostPrefixAdvertised(oc, bgpRouterNamespaceWithSuffix, expectedHostPrefixes)).To(o.BeTrue())
exutil.By(fmt.Sprintf("9. Update the L2 IP Addresspool %s", ipaddresspools[1][0]))
patchL2AddressPool := `[{"op": "replace", "path": "/spec/serviceAllocation/serviceSelectors/0/matchLabels", "value": {"environ": "Dev"}}, {"op": "replace", "path": "/spec/serviceAllocation/serviceSelectors/0/matchExpressions", "value":[{"key":"environ", "operator":"In", "values":["Dev"]}]} ]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "ipaddresspools", ipaddresspools[1][0], "--type=json", "-p", patchL2AddressPool).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("10. Delete previously created services and create new ones without ip address pool annotation")
for i := 0; i < 2; i++ {
svcName := "hello-world" + svcList[i] + testID
removeResource(oc, true, true, "service", svcName, "-n", ns)
}
svcLabelValList := [2]string{"Test", "Dev"}
svc := loadBalancerServiceResource{
name: "",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: "",
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
exutil.By("11. Create L3 and L2 services")
for i := 0; i < 2; i++ {
svc.name = "hello-world" + svcList[i] + testID
svc.labelValue = svcLabelValList[i]
defer removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The %s service created successfully IP %s assigned to it", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddressList[i])).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
340cb55e-8824-4bdd-87cc-e564776ed83e
|
Author:qiowang-High-51187-High-54820-Validate ipAddressPoolSelector, ipAddressPool and nodeSelector are honored when advertising service IP address via BGP advertisement [Serial]
|
['"context"', '"encoding/json"', '"path/filepath"', '"strconv"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:qiowang-High-51187-High-54820-Validate ipAddressPoolSelector, ipAddressPool and nodeSelector are honored when advertising service IP address via BGP advertisement [Serial]", func() {
var (
workers []string
nodeIPs []string
ipaddresspools []string
bgpPeers []string
namespaces []string
expectedPaths1 []string
expectedPaths2 []string
expectedPaths3 []string
bgpPassword string
expectedAddress1 = "10.10.10.1"
expectedAddress2 = "10.10.12.1"
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
ipAddressPoolSelectorsKey = "zone"
ipAddressPoolSelectorsValues = [2][2]string{{"east"}, {"west"}}
ipAddresspoolTemplate = filepath.Join(testDataDir, "ipaddresspool-template.yaml")
BGPPeerTemplate = filepath.Join(testDataDir, "bgppeer-template.yaml")
bgpAdvertisementTemplate = filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
loadBalancerServiceAnnotatedTemplate = filepath.Join(testDataDir, "loadbalancer-svc-annotated-template.yaml")
)
ns := oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test51187")
//Two worker nodes needed to create BGP Advertisement object
workerList, getWorkersErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(getWorkersErr).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run on cluster that has at least two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
nodeIP := getNodeIPv4(oc, ns, workerList.Items[i].Name)
nodeIPs = append(nodeIPs, nodeIP)
}
exutil.By("1. Set up upstream/external BGP router, enable BFD")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix, "--ignore-not-found").Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("2. Create two IP addresspools with different labels")
for i := 0; i < 2; i++ {
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-bgp-51187-" + strconv.Itoa(i),
namespace: opNamespace,
addresses: bgpAddresses[i][:],
namespaces: namespaces,
label1: ipAddressPoolSelectorsKey,
value1: ipAddressPoolSelectorsValues[i][0],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddresspools = append(ipaddresspools, ipAddresspool.name)
}
exutil.By("3. Create BGP Peer")
BGPPeerCR := bgpPeerResource{
name: "peer-64500",
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: "",
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
bgpPeers = append(bgpPeers, BGPPeerCR.name)
exutil.By("4. Create BGP Advertisement with ipAddressPool and nodeSelectors")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-51187",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddresspools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("5. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("6. Create a service requesting address from the first ipaddresspools")
svc := loadBalancerServiceResource{
name: "hello-world-51187",
namespace: ns,
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
annotationKey: "metallb.universe.tf/address-pool",
annotationValue: ipaddresspools[0],
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceAnnotatedTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The service %s 's External IP for OCP-51187 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress1)).To(o.BeTrue())
masterNodeList, getMastersErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(getMastersErr).NotTo(o.HaveOccurred())
result := validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("7. Verify route is advertised")
expectedPaths1 = append(expectedPaths1, "2 available", nodeIPs[0], nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, expectedAddress1, expectedPaths1)).To(o.BeTrue())
exutil.By("8. Remove the previously created services")
removeResource(oc, true, true, "service", svc.name, "-n", svc.namespace)
removeResource(oc, true, true, "replicationcontroller", svc.name, "-n", svc.namespace)
exutil.By("9. Update BGP Advertisement, update ipAddressPool and nodeSelectors, add ipAddressPoolSelectors")
patchBgpAdvertisement := `[{"op": "replace", "path": "/spec/ipAddressPools", "value": [""]}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["` + workers[0] + `"]}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "bgpadvertisement", bgpAdvertisement.name, "--type=json", "-p", patchBgpAdvertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
patchIPAddrPoolSelectors := `{"spec":{"ipAddressPoolSelectors":[{"matchExpressions": [{"key": "` + ipAddressPoolSelectorsKey + `","operator": "In","values": ["` + ipAddressPoolSelectorsValues[1][0] + `"]}]}]}}`
patchResourceAsAdmin(oc, "bgpadvertisement/"+bgpAdvertisement.name, patchIPAddrPoolSelectors, "metallb-system")
exutil.By("10. Check BGP Session between speakers and Router")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("11. Create a service requesting address from the second ipaddresspools")
svc.annotationValue = ipaddresspools[1]
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceAnnotatedTemplate)).To(o.BeTrue())
statusErr = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(statusErr).NotTo(o.HaveOccurred())
svcIP = getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
e2e.Logf("The recreated service %s 's External IP for OCP-51187 test case is %q", svc.name, svcIP)
o.Expect(strings.Contains(svcIP, expectedAddress2)).To(o.BeTrue())
result = validateService(oc, masterNodeList[0], svcIP)
o.Expect(result).To(o.BeTrue())
exutil.By("12. Verify route is advertised")
expectedPaths2 = append(expectedPaths2, "1 available", nodeIPs[0])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, expectedAddress2, expectedPaths2)).To(o.BeTrue())
exutil.By("13. OCP-54820-Add label to the second worker node")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workers[1], "zone")
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workers[1], "zone", "east")
exutil.By("14. OCP-54820-Edit the BGPadvertisement to modify the node selection")
patchBgpAdvertisement = `[{"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/key", "value":"zone"}, {"op": "replace", "path": "/spec/nodeSelectors/0/matchExpressions/0/values", "value":["east"]}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "bgpadvertisement", bgpAdvertisement.name, "--type=json", "-p", patchBgpAdvertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("15. OCP-54820-Check the changes to nodeSelector in BGPadvertisements are reflected which node advertises the host prefix for service")
expectedPaths3 = append(expectedPaths3, "1 available", nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, expectedAddress2, expectedPaths3)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
90d95af2-e89d-4f01-b2b4-00dee7a54946
|
Author:asood-Longduration-NonPreRelease-High-46110-Verify service is functional if BGP peer is modified to cause session to re establish. [Serial]
|
['"context"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-Longduration-NonPreRelease-High-46110-Verify service is functional if BGP peer is modified to cause session to re establish. [Serial]", func() {
var (
testID = "46110"
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
bgpPassword string
nodeIPs []string
expectedPath []string
)
//Two worker nodes needed to create BGP Advertisement object
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList.Items[i].Name)
nodeIP := getNodeIPv4(oc, ns, workerList.Items[i].Name)
nodeIPs = append(nodeIPs, nodeIP)
}
masterNodeList, masterNodeErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(masterNodeErr).NotTo(o.HaveOccurred())
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test46110")
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500-" + testID,
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router is established")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Create a LB service and verify it is accessible ")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID,
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
exutil.By("8. Verify route is advertised")
expectedPath = append(expectedPath, "2 available", nodeIPs[0], nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, expectedPath)).To(o.BeTrue())
exutil.By("9. Verify by setting password for BGP peer the session is no longer established")
patchBGPPeer := `{"spec":{"password":"bgp-test"}}`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", BGPPeerCR.name, "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix, 60*time.Second)).To(o.BeFalse())
exutil.By("10. Verify by unsetting password for BGP peer the session is re established")
patchBGPPeer = `{"spec":{"password":""}}`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bgppeer", BGPPeerCR.name, "-n", opNamespace, "--type=merge", "-p", patchBGPPeer).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("11. Verify route is advertised after the BGP session is re established")
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, expectedPath)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
1c06b981-600f-4145-9261-2b46c9a03144
|
Author:asood-Longduration-NonPreRelease-High-46105-Verify only the specified node BGP peered advertise network prefixes. [Serial]
|
['"encoding/json"', '"fmt"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-Longduration-NonPreRelease-High-46105-Verify only the specified node BGP peered advertise network prefixes. [Serial]", func() {
var (
testID = "46105"
ns string
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [1]string{"Test"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
workers []string
ipaddrpools []string
bgpPeers []string
bgpPassword string
nodeIPs []string
expectedPath []string
newExpectedPath []string
)
//Two worker nodes needed to create BGP Advertisement object
workerList := excludeSriovNodes(oc)
if len(workerList) < 2 {
g.Skip("This case requires 2 nodes, but the cluster has less than two nodes")
}
for i := 0; i < 2; i++ {
workers = append(workers, workerList[i])
nodeIP := getNodeIPv4(oc, ns, workerList[i])
nodeIPs = append(nodeIPs, nodeIP)
}
masterNodeList, masterNodeErr := exutil.GetClusterNodesBy(oc, "master")
o.Expect(masterNodeErr).NotTo(o.HaveOccurred())
exutil.By("1. Get the namespace")
ns = oc.Namespace()
namespaces = append(namespaces, ns)
namespaces = append(namespaces, "test46110")
exutil.By("2. Set up upstream/external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
bgpPassword = ""
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3. Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500-" + testID,
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("4. Check BGP Session between speakers and Router is established")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
exutil.By("5. Create IP addresspool")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l3-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: bgpAddresses[0][:],
namespaces: namespaces,
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: serviceSelectorValue[:],
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
ipaddrpools = append(ipaddrpools, ipAddresspool.name)
exutil.By("6. Create BGP Advertisement")
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv",
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: ipaddrpools[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("7. Update the BGP Peer with selected nodes ")
bgppeerWorkersList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
patchBGPPeer := fmt.Sprintf("{\"spec\":{\"nodeSelectors\": [{\"matchExpressions\": [{\"key\":\"kubernetes.io/hostname\", \"operator\": \"In\", \"values\": %s}]}]}}", string(bgppeerWorkersList))
patchResourceAsAdmin(oc, "bgppeer/"+BGPPeerCR.name, patchBGPPeer, opNamespace)
exutil.By("8. Create a LB service and verify it is accessible ")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "hello-world-" + testID,
namespace: namespaces[0],
externaltrafficpolicy: "Cluster",
labelKey: serviceLabelKey,
labelValue: serviceLabelValue,
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
o.Expect(createLoadBalancerService(oc, svc, loadBalancerServiceTemplate)).To(o.BeTrue())
err = checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
o.Expect(validateService(oc, masterNodeList[0], svcIP)).To(o.BeTrue())
exutil.By("9. Verify route is advertised")
expectedPath = append(expectedPath, "2 available", nodeIPs[0], nodeIPs[1])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, expectedPath)).To(o.BeTrue())
exutil.By("10. Label one of the nodes")
metalLBLabel := "feature.node.kubernetes.io/bgp.capable"
e2enode.AddOrUpdateLabelOnNode(oc.KubeFramework().ClientSet, workerList[0], metalLBLabel, "true")
defer e2enode.RemoveLabelOffNode(oc.KubeFramework().ClientSet, workerList[0], metalLBLabel)
exutil.By("11. Update BGP peer node selector with node that is labelled")
patchBGPPeer = `[{"op": "replace", "path": "/spec/nodeSelectors", "value":[{"matchExpressions": [{"key": "` + metalLBLabel + `", "operator": "Exists"}]}]}]`
patchReplaceResourceAsAdmin(oc, "bgppeer/"+BGPPeerCR.name, patchBGPPeer, opNamespace)
exutil.By("12. Verify the advertised routes")
newExpectedPath = append(newExpectedPath, "1 available", nodeIPs[0])
o.Expect(checkBGPv4RouteTableEntry(oc, bgpRouterNamespaceWithSuffix, svcIP, newExpectedPath)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7bd5c4ee-4b4e-4b14-94eb-9e8e9c13ec86
|
Author:asood-High-76801-Validate LB services can be created in UDN with MetalLB operator on non cloud platform. [Serial]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb.go
|
g.It("Author:asood-High-76801-Validate LB services can be created in UDN with MetalLB operator on non cloud platform. [Serial]", func() {
var (
namespaces []string
serviceSelectorKey = "environ"
serviceSelectorValue = [2]string{"Test", "Dev"}
namespaceLabelKey = "region"
namespaceLabelValue = [1]string{"NA"}
interfaces = [3]string{"br-ex", "eno1", "eno2"}
workers []string
l2IPAddressPool []string
l3IPAddressPool []string
bgpPeers []string
bgpPassword = ""
bgpCommunties = []string{"65001:65500"}
cidr = []string{"10.150.0.0/16", "10.151.0.0/16"}
mtu int32 = 1300
prefix int32 = 24
testID = "76801"
proxyHost = "10.8.1.181"
routerNS = ""
udnTestDataDir = exutil.FixturePath("testdata", "networking")
udnCRDL2SingleStack = filepath.Join(udnTestDataDir, "udn/udn_crd_layer2_singlestack_template.yaml")
udnCRDL3SingleStack = filepath.Join(udnTestDataDir, "udn/udn_crd_singlestack_template.yaml")
udnNADTemplate = filepath.Join(udnTestDataDir, "udn/udn_nad_template.yaml")
)
exutil.By("1. Obtain the workers")
workerList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
if len(workerList.Items) < 2 {
g.Skip("These cases can only be run for cluster that has atleast two worker nodes")
}
for i := 0; i < len(workerList.Items); i++ {
workers = append(workers, workerList.Items[i].Name)
}
exutil.By("2. Set up user defined network namespaces")
for i := 0; i < 4; i++ {
oc.CreateNamespaceUDN()
namespaces = append(namespaces, oc.Namespace())
}
exutil.By("2.1. Create CRD for UDN in first two namespaces")
udnResourceName := []string{"l2-network-udn", "l3-network-udn"}
udnTemplate := []string{udnCRDL2SingleStack, udnCRDL3SingleStack}
udnCRD := make([]udnCRDResource, 2)
for i := 0; i < 2; i++ {
udnCRD[i] = udnCRDResource{
crdname: udnResourceName[i],
namespace: namespaces[i],
role: "Primary",
mtu: mtu,
cidr: cidr[i],
prefix: prefix,
template: udnTemplate[i],
}
switch i {
case 0:
udnCRD[0].createLayer2SingleStackUDNCRD(oc)
case 1:
udnCRD[1].createUdnCRDSingleStack(oc)
default:
// Do nothing
}
err := waitUDNCRDApplied(oc, namespaces[i], udnCRD[i].crdname)
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2.2 Create NAD for UDN in last two namespaces")
udnNADResourceName := []string{"l2-network-nad", "l3-network-nad"}
topology := []string{"layer2", "layer3"}
udnNAD := make([]udnNetDefResource, 2)
for i := 0; i < 2; i++ {
udnNAD[i] = udnNetDefResource{
nadname: udnNADResourceName[i],
namespace: namespaces[i+2],
nad_network_name: udnNADResourceName[i],
topology: topology[i],
subnet: "",
mtu: mtu,
net_attach_def_name: fmt.Sprintf("%s/%s", namespaces[i+2], udnNADResourceName[i]),
role: "primary",
template: udnNADTemplate,
}
udnNAD[i].subnet = cidr[i]
udnNAD[i].createUdnNad(oc)
}
exutil.By("3.1 Set up external BGP router")
suffix := getRandomString()
bgpRouterNamespaceWithSuffix := bgpRouterNamespace + "-" + suffix
defer oc.DeleteSpecifiedNamespaceAsAdmin(bgpRouterNamespaceWithSuffix)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", bgpRouterPodName, "-n", bgpRouterNamespaceWithSuffix).Execute()
o.Expect(setUpExternalFRRRouter(oc, bgpRouterNamespaceWithSuffix, bgpPassword)).To(o.BeTrue())
exutil.By("3.2 Create BGP Peer")
BGPPeerTemplate := filepath.Join(testDataDir, "bgppeer-template.yaml")
BGPPeerCR := bgpPeerResource{
name: "peer-64500-" + testID,
namespace: opNamespace,
holdTime: "30s",
keepAliveTime: "10s",
password: bgpPassword,
myASN: myASN,
peerASN: peerASN,
peerAddress: peerIPAddress,
template: BGPPeerTemplate,
}
defer removeResource(oc, true, true, "bgppeers", BGPPeerCR.name, "-n", BGPPeerCR.namespace)
bgpPeers = append(bgpPeers, BGPPeerCR.name)
o.Expect(createBGPPeerCR(oc, BGPPeerCR)).To(o.BeTrue())
exutil.By("3.3 Check BGP Session between speakers and Router is established")
o.Expect(checkBGPSessions(oc, bgpRouterNamespaceWithSuffix)).To(o.BeTrue())
routerNS = getRouterPodNamespace(oc)
o.Expect(routerNS).NotTo(o.BeEmpty())
exutil.By("4. Create L2 and L3 IP addresspools")
ipAddresspoolTemplate := filepath.Join(testDataDir, "ipaddresspool-template.yaml")
ipAddresspool := ipAddressPoolResource{
name: "ipaddresspool-l2-" + testID,
namespace: opNamespace,
label1: ipAddressPoolLabelKey,
value1: ipAddressPoolLabelVal,
addresses: l2Addresses[0][:],
namespaces: namespaces[:],
priority: 10,
avoidBuggyIPs: true,
autoAssign: true,
serviceLabelKey: serviceSelectorKey,
serviceLabelValue: serviceSelectorValue[0],
serviceSelectorKey: serviceSelectorKey,
serviceSelectorOperator: "In",
serviceSelectorValue: []string{serviceSelectorValue[0], "dummy"},
namespaceLabelKey: namespaceLabelKey,
namespaceLabelValue: namespaceLabelValue[0],
namespaceSelectorKey: namespaceLabelKey,
namespaceSelectorOperator: "In",
namespaceSelectorValue: namespaceLabelValue[:],
template: ipAddresspoolTemplate,
}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
result := createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)
o.Expect(result).To(o.BeTrue())
l2IPAddressPool = append(l2IPAddressPool, ipAddresspool.name)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipAddresspool.name, "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", opNamespace)
exutil.By("SUCCESS - L2 IP Addresspool created")
ipAddresspool.name = "ipaddresspool-l3-" + testID
ipAddresspool.addresses = l3Addresses[0][:]
ipAddresspool.serviceLabelValue = serviceSelectorValue[1]
ipAddresspool.serviceSelectorValue = []string{serviceSelectorValue[1], "dummy"}
defer removeResource(oc, true, true, "ipaddresspools", ipAddresspool.name, "-n", ipAddresspool.namespace)
o.Expect(createIPAddressPoolCR(oc, ipAddresspool, ipAddresspoolTemplate)).To(o.BeTrue())
l3IPAddressPool = append(l3IPAddressPool, ipAddresspool.name)
patchResourceAsAdmin(oc, "ipaddresspools/"+ipAddresspool.name, "{\"spec\":{\"serviceAllocation\": {\"namespaces\": []}}}", opNamespace)
exutil.By("SUCCESS - L3 IP Addresspool created")
exutil.By("5. Create L2 and BGP Advertisement")
l2AdvertisementTemplate := filepath.Join(testDataDir, "l2advertisement-template.yaml")
l2advertisement := l2AdvertisementResource{
name: "l2-adv-" + testID,
namespace: opNamespace,
ipAddressPools: l2IPAddressPool[:],
interfaces: interfaces[:],
nodeSelectorValues: workers[:],
template: l2AdvertisementTemplate,
}
defer removeResource(oc, true, true, "l2advertisements", l2advertisement.name, "-n", l2advertisement.namespace)
o.Expect(createL2AdvertisementCR(oc, l2advertisement, l2AdvertisementTemplate)).To(o.BeTrue())
l2AdvWorkersList, err := json.Marshal(workers)
o.Expect(err).NotTo(o.HaveOccurred())
patchL2Advertisement := fmt.Sprintf("[{\"op\": \"replace\", \"path\": \"/spec/nodeSelectors/0/matchExpressions/0/values\", \"value\":%s}]", l2AdvWorkersList)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", opNamespace, "l2advertisement", l2advertisement.name, "--type=json", "-p", patchL2Advertisement).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
bgpAdvertisementTemplate := filepath.Join(testDataDir, "bgpadvertisement-template.yaml")
bgpAdvertisement := bgpAdvertisementResource{
name: "bgp-adv-" + testID,
namespace: opNamespace,
aggregationLength: 32,
aggregationLengthV6: 128,
communities: bgpCommunties[:],
ipAddressPools: l3IPAddressPool[:],
nodeSelectorsKey: "kubernetes.io/hostname",
nodeSelectorsOperator: "In",
nodeSelectorValues: workers[:],
peer: bgpPeers[:],
template: bgpAdvertisementTemplate,
}
defer removeResource(oc, true, true, "bgpadvertisements", bgpAdvertisement.name, "-n", bgpAdvertisement.namespace)
o.Expect(createBGPAdvertisementCR(oc, bgpAdvertisement)).To(o.BeTrue())
exutil.By("6. Create LoadBalancer services")
loadBalancerServiceTemplate := filepath.Join(testDataDir, "loadbalancer-svc-template.yaml")
svc := loadBalancerServiceResource{
name: "",
namespace: "",
externaltrafficpolicy: "Cluster",
labelKey: serviceSelectorKey,
labelValue: "",
allocateLoadBalancerNodePorts: true,
template: loadBalancerServiceTemplate,
}
for _, ns := range namespaces {
for index, serviceSelector := range serviceSelectorValue {
svc.name = "hello-world-" + testID + "-" + strconv.Itoa(index)
svc.namespace = ns
svc.labelValue = serviceSelector
exutil.By(fmt.Sprintf("6.1 Create LoadBalancer service %s in %s", svc.name, svc.namespace))
o.Expect(createLoadBalancerService(oc, svc, svc.template)).To(o.BeTrue())
err := checkLoadBalancerSvcStatus(oc, svc.namespace, svc.name)
o.Expect(err).NotTo(o.HaveOccurred())
svcIP := getLoadBalancerSvcIP(oc, svc.namespace, svc.name)
//svcClusterIP := getSvcIPv4(oc, svc.namespace, svc.name )
exutil.By(fmt.Sprintf("6.2 Validating service %s using external IP %s", svc.name, svcIP))
svcIPCmd := fmt.Sprintf("curl -s -I --connect-timeout 5 %s:80", svcIP)
o.Eventually(func() bool {
cmdOutput, _ := exutil.RemoteShPodWithBashSpecifyContainer(oc, routerNS, "router-master1", "testcontainer", svcIPCmd)
return strings.Contains(cmdOutput, "200 OK")
}, "120s", "10s").Should(o.BeTrue(), "Service validation failed")
// L3 addresses are not accessible outside cluster
if index == 0 {
exutil.By(fmt.Sprintf("6.3 Validating service %s using external IP %s", svc.name, svcIP))
o.Eventually(func() bool {
return validateService(oc, proxyHost, svcIP)
}, "120s", "10s").Should(o.BeTrue(), "Service validation failed")
}
}
}
})
| |||||
file
|
openshift/openshift-tests-private
|
2cac0008-6e1f-42a3-b190-3c36ee429ecf
|
metallb_util
|
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
package networking
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
type subscriptionResource struct {
name string
namespace string
operatorName string
channel string
catalog string
catalogNamespace string
template string
}
type namespaceResource struct {
name string
template string
}
type operatorGroupResource struct {
name string
namespace string
targetNamespaces string
template string
}
type metalLBCRResource struct {
name string
namespace string
nodeSelectorKey string
nodeSelectorVal string
controllerSelectorKey string
controllerSelectorVal string
template string
}
type loadBalancerServiceResource struct {
name string
namespace string
protocol string
annotationKey string
annotationValue string
labelKey string
labelValue string
externaltrafficpolicy string
allocateLoadBalancerNodePorts bool
template string
}
type ipAddressPoolResource struct {
name string
namespace string
label1 string
value1 string
priority int
avoidBuggyIPs bool
autoAssign bool
addresses []string
namespaces []string
serviceLabelKey string
serviceLabelValue string
serviceSelectorKey string
serviceSelectorOperator string
serviceSelectorValue []string
namespaceLabelKey string
namespaceLabelValue string
namespaceSelectorKey string
namespaceSelectorOperator string
namespaceSelectorValue []string
template string
}
type l2AdvertisementResource struct {
name string
namespace string
interfaces []string
ipAddressPools []string
nodeSelectorsKey string
nodeSelectorsOperator string
nodeSelectorValues []string
template string
}
type bgpPeerResource struct {
name string
namespace string
bfdProfile string
holdTime string
password string
keepAliveTime string
myASN int
peerASN int
peerAddress string
peerPort int
template string
}
type bgpAdvertisementResource struct {
name string
namespace string
communities []string
aggregationLength int
aggregationLengthV6 int
ipAddressPools []string
nodeSelectorsKey string
nodeSelectorsOperator string
nodeSelectorValues []string
peer []string
template string
}
type bfdProfileResource struct {
name string
namespace string
detectMultiplier int
echoMode bool
echoReceiveInterval int
echoTransmitInterval int
minimumTtl int
passiveMode bool
receiveInterval int
transmitInterval int
template string
}
type routerConfigMapResource struct {
name string
namespace string
bgpd_enabled string
bfdd_enabled string
routerIP string
node1IP string
node2IP string
node3IP string
node4IP string
node5IP string
password string
bfdProfile string
template string
}
type routerNADResource struct {
name string
namespace string
interfaceName string
template string
}
type routerPodResource struct {
name string
namespace string
configMapName string
NADName string
routerIP string
masterNodeName string
template string
}
type communityResource struct {
name string
namespace string
communityName string
value1 string
value2 string
template string
}
// struct to be used with node and pod affinity templates
// nodeaffinity template needs param1 as node1, param2 as node2
// pod affinity and anti affinity needs param1 as namespace1 and param2 as namespace1
type metalLBAffinityCRResource struct {
name string
namespace string
param1 string
param2 string
template string
}
var (
snooze time.Duration = 720
bgpRouterIP = "192.168.111.60/24"
bgpRouterConfigMapName = "router-master1-config"
bgpRouterPodName = "router-master1"
bgpRouterNamespace = "router-system"
bgpRouterNADName = "external1"
)
func operatorInstall(oc *exutil.CLI, sub subscriptionResource, ns namespaceResource, og operatorGroupResource) (status bool) {
//Installing Operator
g.By(" (1) INSTALLING Operator in the namespace")
//Applying the config of necessary yaml files from templates to create metallb operator
g.By("(1.1) Applying namespace template")
err0 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ns.template, "-p", "NAME="+ns.name)
if err0 != nil {
e2e.Logf("Error creating namespace %v", err0)
}
g.By("(1.2) Applying operatorgroup yaml")
err0 = applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", og.template, "-p", "NAME="+og.name, "NAMESPACE="+og.namespace, "TARGETNAMESPACES="+og.targetNamespaces)
if err0 != nil {
e2e.Logf("Error creating operator group %v", err0)
}
g.By("(1.3) Creating subscription yaml from template")
// no need to check for an existing subscription
err0 = applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "OPERATORNAME="+sub.operatorName, "SUBSCRIPTIONNAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"CATALOGSOURCE="+sub.catalog, "CATALOGSOURCENAMESPACE="+sub.catalogNamespace)
if err0 != nil {
e2e.Logf("Error creating subscription %v", err0)
}
//confirming operator install
g.By("(1.4) Verify the operator finished subscribing")
errCheck := wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
subState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(subState, "AtLatestKnown") == 0 {
return true, nil
}
// log full status of sub for installation failure debugging
subState, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status}").Output()
e2e.Logf("Status of subscription: %v", subState)
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Subscription %s in namespace %v does not have expected status", sub.name, sub.namespace))
g.By("(1.5) Get csvName")
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
errCheck = wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", sub.namespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(csvState, "Succeeded") == 0 {
e2e.Logf("CSV check complete!!!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("CSV %v in %v namespace does not have expected status", csvName, sub.namespace))
return true
}
func createMetalLBCR(oc *exutil.CLI, metallbcr metalLBCRResource, metalLBCRTemplate string) (status bool) {
g.By("Creating MetalLB CR from template")
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", metallbcr.template, "-p", "NAME="+metallbcr.name, "NAMESPACE="+metallbcr.namespace,
"NODESELECTORKEY="+metallbcr.nodeSelectorKey, "NODESELECTORVAL="+metallbcr.nodeSelectorVal,
"CONTROLLERSELECTORKEY="+metallbcr.controllerSelectorKey, "CONTROLLERSELECTORVAL="+metallbcr.controllerSelectorVal)
if err != nil {
e2e.Logf("Error creating MetalLB CR %v", err)
return false
}
err = waitForPodWithLabelReady(oc, metallbcr.namespace, "component=speaker")
exutil.AssertWaitPollNoErr(err, "The pods with label component=speaker are not ready")
if err != nil {
e2e.Logf("Speaker Pods did not transition to ready state %v", err)
return false
}
err = waitForPodWithLabelReady(oc, metallbcr.namespace, "component=frr-k8s")
exutil.AssertWaitPollNoErr(err, "The pods with label component=frr-k8s are not ready")
if err != nil {
e2e.Logf("FRR k8s Pods did not transition to ready state %v", err)
return false
}
err = waitForPodWithLabelReady(oc, metallbcr.namespace, "component=controller")
exutil.AssertWaitPollNoErr(err, "The pod with label component=controller is not ready")
if err != nil {
e2e.Logf("Controller pod did not transition to ready state %v", err)
return false
}
e2e.Logf("Controller and speaker pods created successfully")
return true
}
func validateAllWorkerNodeMCR(oc *exutil.CLI, namespace string) bool {
var (
podList = []string{}
)
nodeList, err := exutil.GetClusterNodesBy(oc, "worker")
if err != nil {
e2e.Logf("Unable to get nodes to determine if node is worker node %s", err)
return false
}
speakerPodList, errSpeaker := exutil.GetAllPodsWithLabel(oc, namespace, "component=speaker")
if errSpeaker != nil {
e2e.Logf("Unable to get list of speaker pods %s", err)
return false
}
if len(speakerPodList) != len(nodeList) {
e2e.Logf("Speaker pods not scheduled on all worker nodes")
return false
}
frrk8sPodList, errFrrk8s := exutil.GetAllPodsWithLabel(oc, namespace, "component=frr-k8s")
if errFrrk8s != nil {
e2e.Logf("Unable to get list of frr-k8s pods %s", err)
return false
}
if len(frrk8sPodList) != len(nodeList) {
e2e.Logf("K8s FRR pods not scheduled on all worker nodes")
return false
}
podList = append(podList, speakerPodList[:]...)
podList = append(podList, frrk8sPodList[:]...)
// Iterate over the speaker and frr-k8s pods to validate they are scheduled on node that is worker node
for _, pod := range podList {
nodeName, _ := exutil.GetPodNodeName(oc, namespace, pod)
e2e.Logf("Pod %s, node name %s", pod, nodeName)
if isWorkerNode(oc, nodeName, nodeList) == false {
return false
}
}
return true
}
func isWorkerNode(oc *exutil.CLI, nodeName string, nodeList []string) bool {
for i := 0; i <= (len(nodeList) - 1); i++ {
if nodeList[i] == nodeName {
return true
}
}
return false
}
func createLoadBalancerService(oc *exutil.CLI, loadBalancerSvc loadBalancerServiceResource, loadBalancerServiceTemplate string) (status bool) {
var msg, svcFile string
var err error
if strings.Contains(loadBalancerServiceTemplate, "annotated") {
e2e.Logf("Template %s", loadBalancerServiceTemplate)
svcFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", loadBalancerSvc.template, "-p", "NAME="+loadBalancerSvc.name, "NAMESPACE="+loadBalancerSvc.namespace,
"PROTOCOL="+loadBalancerSvc.protocol,
"LABELKEY1="+loadBalancerSvc.labelKey, "LABELVALUE1="+loadBalancerSvc.labelValue,
"ANNOTATIONKEY="+loadBalancerSvc.annotationKey, "ANNOTATIONVALUE="+loadBalancerSvc.annotationValue,
"EXTERNALTRAFFICPOLICY="+loadBalancerSvc.externaltrafficpolicy, "NODEPORTALLOCATION="+strconv.FormatBool(loadBalancerSvc.allocateLoadBalancerNodePorts)).OutputToFile(getRandomString() + "svc.json")
} else {
svcFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", loadBalancerSvc.template, "-p", "NAME="+loadBalancerSvc.name, "NAMESPACE="+loadBalancerSvc.namespace,
"PROTOCOL="+loadBalancerSvc.protocol,
"LABELKEY1="+loadBalancerSvc.labelKey, "LABELVALUE1="+loadBalancerSvc.labelValue,
"EXTERNALTRAFFICPOLICY="+loadBalancerSvc.externaltrafficpolicy, "NODEPORTALLOCATION="+strconv.FormatBool(loadBalancerSvc.allocateLoadBalancerNodePorts)).OutputToFile(getRandomString() + "svc.json")
}
g.By("Creating service file")
if err != nil {
e2e.Logf("Error creating LoadBalancerService %v with %v", err, svcFile)
return false
}
g.By("Applying service file " + svcFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", svcFile, "-n", loadBalancerSvc.namespace).Output()
if err != nil {
e2e.Logf("Could not apply svcFile %v %v", msg, err)
return false
}
return true
}
// statusCheckTime is interval and timeout in seconds e.g. 10 and 30
func checkLoadBalancerSvcStatus(oc *exutil.CLI, namespace string, svcName string, statusCheckTime ...time.Duration) error {
interval := 10 * time.Second
timeout := 300 * time.Second
if len(statusCheckTime) > 0 {
e2e.Logf("Interval %s, Timeout %s", statusCheckTime[0], statusCheckTime[1])
interval = statusCheckTime[0]
timeout = statusCheckTime[1]
}
return wait.Poll(interval, timeout, func() (bool, error) {
e2e.Logf("Checking status of service %s", svcName)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.status.loadBalancer.ingress[0].ip}").Output()
if err != nil {
e2e.Logf("Failed to get service status, error:%v. Trying again", err)
return false, nil
}
if strings.Contains(output, "<pending>") || output == "" {
e2e.Logf("Failed to assign address to service, error:%v. Trying again", err)
return false, nil
}
return true, nil
})
}
func getLoadBalancerSvcIP(oc *exutil.CLI, namespace string, svcName string) string {
svcIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.status.loadBalancer.ingress[0].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("LoadBalancer service %s's, IP is :%s", svcName, svcIP)
return svcIP
}
func validateService(oc *exutil.CLI, curlHost string, svcExternalIP string) bool {
e2e.Logf("Validating service with IP %s", svcExternalIP)
curlHostIP := net.ParseIP(curlHost)
var curlOutput string
var curlErr error
connectTimeout := "5"
if curlHostIP.To4() != nil {
//From test runner with proxy
var cmdOutput []byte
svcChkCmd := fmt.Sprintf("curl -H 'Cache-Control: no-cache' -x 'http://%s:8888' %s --connect-timeout %s", curlHost, svcExternalIP, connectTimeout)
cmdOutput, curlErr = exec.Command("bash", "-c", svcChkCmd).Output()
curlOutput = string(cmdOutput)
} else {
curlOutput, curlErr = exutil.DebugNode(oc, curlHost, "curl", svcExternalIP, "--connect-timeout", connectTimeout)
}
if strings.Contains(curlOutput, "Hello OpenShift!") {
return true
}
if curlErr != nil {
e2e.Logf("Error %s", curlErr)
return false
}
e2e.Logf("Output of curl %s", curlOutput)
return false
}
func obtainMACAddressForIP(oc *exutil.CLI, nodeName string, svcExternalIP string, arpReuests int) (string, bool) {
defInterface, intErr := getDefaultInterface(oc)
o.Expect(intErr).NotTo(o.HaveOccurred())
cmd := fmt.Sprintf("arping -I %s %s -c %d", defInterface, svcExternalIP, arpReuests)
//https://issues.redhat.com/browse/OCPBUGS-10321 DebugNodeWithOptionsAndChroot replaced
output, arpErr := exutil.DebugNodeWithOptions(oc, nodeName, []string{"-q"}, "bin/sh", "-c", cmd)
//CI run the command returns non-zero exit code from debug container
if arpErr != nil {
return "", false
}
e2e.Logf("ARP request response %s", output)
re := regexp.MustCompile(`([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})`)
var macAddress string
if re.MatchString(output) {
submatchall := re.FindAllString(output, -1)
macAddress = submatchall[0]
return macAddress, true
} else {
return "", false
}
}
func getNodeAnnouncingL2Service(oc *exutil.CLI, svcName string, namespace string) string {
fieldSelectorArgs := fmt.Sprintf("reason=nodeAssigned,involvedObject.kind=Service,involvedObject.name=%s", svcName)
var nodeName string
errCheck := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
var allEvents []string
var svcEvents string
svcEvents, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", namespace, "--field-selector", fieldSelectorArgs).Output()
if err != nil {
return false, nil
}
if !strings.Contains(svcEvents, "No resources found") {
for _, index := range strings.Split(svcEvents, "\n") {
if strings.Contains(index, "announcing from node") {
e2e.Logf("Processing event service %s", index)
re := regexp.MustCompile(`"([^\"]+)"`)
event := re.FindString(index)
allEvents = append(allEvents, event)
}
}
nodeName = strings.Trim(allEvents[len(allEvents)-1], "\"")
return true, nil
}
return false, nil
})
o.Expect(nodeName).NotTo(o.BeEmpty())
o.Expect(errCheck).NotTo(o.HaveOccurred())
return nodeName
}
func isPlatformSuitable(oc *exutil.CLI) bool {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1/rdu2 cluster , skip for other envrionment!!!")
}
return true
}
func createIPAddressPoolCR(oc *exutil.CLI, ipAddresspool ipAddressPoolResource, addressPoolTemplate string) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipAddresspool.template, "-p", "NAME="+ipAddresspool.name, "NAMESPACE="+ipAddresspool.namespace, "PRIORITY="+strconv.Itoa(int(ipAddresspool.priority)),
"LABELKEY1="+ipAddresspool.label1, "LABELVALUE1="+ipAddresspool.value1, "AUTOASSIGN="+strconv.FormatBool(ipAddresspool.autoAssign), "AVOIDBUGGYIPS="+strconv.FormatBool(ipAddresspool.avoidBuggyIPs),
"ADDRESS1="+ipAddresspool.addresses[0], "ADDRESS2="+ipAddresspool.addresses[1], "NAMESPACE1="+ipAddresspool.namespaces[0], "NAMESPACE2="+ipAddresspool.namespaces[1],
"MLSERVICEKEY1="+ipAddresspool.serviceLabelKey, "MLSERVICEVALUE1="+ipAddresspool.serviceLabelValue, "MESERVICEKEY1="+ipAddresspool.serviceSelectorKey, "MESERVICEOPERATOR1="+ipAddresspool.serviceSelectorOperator, "MESERVICEKEY1VALUE1="+ipAddresspool.serviceSelectorValue[0],
"MLNAMESPACEKEY1="+ipAddresspool.serviceLabelKey, "MLNAMESPACEVALUE1="+ipAddresspool.serviceLabelValue, "MENAMESPACEKEY1="+ipAddresspool.namespaceSelectorKey, "MENAMESPACEOPERATOR1="+ipAddresspool.namespaceSelectorOperator, "MENAMESPACEKEY1VALUE1="+ipAddresspool.namespaceSelectorValue[0])
if err != nil {
e2e.Logf("Error creating IP Addresspool %v", err)
return false
}
return true
}
func createL2AdvertisementCR(oc *exutil.CLI, l2advertisement l2AdvertisementResource, l2AdvertisementTemplate string) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", l2advertisement.template, "-p", "NAME="+l2advertisement.name, "NAMESPACE="+l2advertisement.namespace,
"IPADDRESSPOOL1="+l2advertisement.ipAddressPools[0], "INTERFACE1="+l2advertisement.interfaces[0], "INTERFACE2="+l2advertisement.interfaces[1], "INTERFACE3="+l2advertisement.interfaces[2],
"WORKER1="+l2advertisement.nodeSelectorValues[0], "WORKER2="+l2advertisement.nodeSelectorValues[1])
if err != nil {
e2e.Logf("Error creating l2advertisement %v", err)
return false
}
return true
}
func getLoadBalancerSvcNodePort(oc *exutil.CLI, namespace string, svcName string) string {
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ports[0].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return nodePort
}
func createConfigMap(oc *exutil.CLI, testDataDir string, namespace string, cmdArgs ...string) (status bool) {
var bgpPassword string
var bfdEnabled string
var bfdProfile string
//parse cmd arguments
if len(cmdArgs) > 1 {
e2e.Logf("BGP Password %s, BFD Status %s, BFD Profile %s", cmdArgs[0], cmdArgs[1], cmdArgs[2])
bgpPassword = cmdArgs[0]
bfdEnabled = cmdArgs[1]
bfdProfile = cmdArgs[2]
} else if len(cmdArgs) == 1 {
bgpPassword = cmdArgs[0]
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) >= 5).NotTo(o.BeFalse())
var nodeIPs []string
var nodeIP string
for _, node := range nodeList.Items {
nodeIP = getNodeIPv4(oc, namespace, node.Name)
nodeIPs = append(nodeIPs, nodeIP)
}
frrMasterSingleStackConfigMapTemplate := filepath.Join(testDataDir, "frr-master-singlestack-configmap-template.yaml")
frrMasterSingleStackConfigMap := routerConfigMapResource{
name: bgpRouterConfigMapName,
namespace: namespace,
bgpd_enabled: "yes",
bfdd_enabled: bfdEnabled,
routerIP: "192.168.111.60",
node1IP: nodeIPs[0],
node2IP: nodeIPs[1],
node3IP: nodeIPs[2],
node4IP: nodeIPs[3],
node5IP: nodeIPs[4],
password: bgpPassword,
bfdProfile: bfdProfile,
template: frrMasterSingleStackConfigMapTemplate,
}
errTemplate := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", frrMasterSingleStackConfigMap.template, "-p", "NAME="+frrMasterSingleStackConfigMap.name, "NAMESPACE="+frrMasterSingleStackConfigMap.namespace,
"BGPD_ENABLED="+frrMasterSingleStackConfigMap.bgpd_enabled, "BFDD_ENABLED="+frrMasterSingleStackConfigMap.bfdd_enabled, "ROUTER_IP="+frrMasterSingleStackConfigMap.routerIP, "NODE1_IP="+frrMasterSingleStackConfigMap.node1IP,
"NODE2_IP="+frrMasterSingleStackConfigMap.node2IP, "NODE3_IP="+frrMasterSingleStackConfigMap.node3IP, "NODE4_IP="+frrMasterSingleStackConfigMap.node4IP,
"NODE5_IP="+frrMasterSingleStackConfigMap.node5IP, "BFD_PROFILE="+frrMasterSingleStackConfigMap.bfdProfile, "PASSWORD="+frrMasterSingleStackConfigMap.password)
if errTemplate != nil {
e2e.Logf("Error creating config map %v", errTemplate)
return false
}
return true
}
func createNAD(oc *exutil.CLI, testDataDir string, namespace string) (status bool) {
defInterface, intErr := getDefaultInterface(oc)
o.Expect(intErr).NotTo(o.HaveOccurred())
frrMasterSingleStackNADTemplate := filepath.Join(testDataDir, "frr-master-singlestack-nad-template.yaml")
frrMasterSingleStackNAD := routerNADResource{
name: bgpRouterNADName,
namespace: namespace,
interfaceName: defInterface,
template: frrMasterSingleStackNADTemplate,
}
errTemplate := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", frrMasterSingleStackNAD.template, "-p", "NAME="+frrMasterSingleStackNAD.name, "INTERFACE="+frrMasterSingleStackNAD.interfaceName, "NAMESPACE="+frrMasterSingleStackNAD.namespace)
if errTemplate != nil {
e2e.Logf("Error creating network attachment definition %v", errTemplate)
return false
}
return true
}
func createRouterPod(oc *exutil.CLI, testDataDir string, namespace string) (status bool) {
frrMasterSingleStackRouterPodTemplate := filepath.Join(testDataDir, "frr-master-singlestack-router-pod-template.yaml")
NADName, errNAD := oc.AsAdmin().WithoutNamespace().Run("get").Args("network-attachment-definitions", "-n", namespace, "--no-headers", "-o=custom-columns=NAME:.metadata.name").Output()
o.Expect(errNAD).NotTo(o.HaveOccurred())
masterNode, errMaster := exutil.GetFirstMasterNode(oc)
o.Expect(errMaster).NotTo(o.HaveOccurred())
frrMasterSingleStackRouterPod := routerPodResource{
name: bgpRouterPodName,
namespace: namespace,
configMapName: bgpRouterConfigMapName,
NADName: NADName,
routerIP: bgpRouterIP,
masterNodeName: masterNode,
template: frrMasterSingleStackRouterPodTemplate,
}
errTemplate := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", frrMasterSingleStackRouterPod.template, "-p", "NAME="+frrMasterSingleStackRouterPod.name, "NAMESPACE="+frrMasterSingleStackRouterPod.namespace,
"CONFIG_MAP_NAME="+frrMasterSingleStackRouterPod.configMapName, "ROUTER_IP="+frrMasterSingleStackRouterPod.routerIP, "MASTER_NODENAME="+frrMasterSingleStackRouterPod.masterNodeName, "NAD_NAME="+frrMasterSingleStackRouterPod.NADName)
if errTemplate != nil {
e2e.Logf("Error creating router pod %v", errTemplate)
return false
}
err := waitForPodWithLabelReady(oc, namespace, "name=router-pod")
o.Expect(err).NotTo(o.HaveOccurred())
return true
}
func setUpExternalFRRRouter(oc *exutil.CLI, bgpRouterNamespace string, cmdArgs ...string) (status bool) {
testDataDir := exutil.FixturePath("testdata", "networking/metallb")
g.By(" Create namespace")
oc.CreateSpecifiedNamespaceAsAdmin(bgpRouterNamespace)
exutil.SetNamespacePrivileged(oc, bgpRouterNamespace)
g.By(" Create config map")
o.Expect(createConfigMap(oc, testDataDir, bgpRouterNamespace, cmdArgs...)).To(o.BeTrue())
g.By(" Create network attachment defiition")
o.Expect(createNAD(oc, testDataDir, bgpRouterNamespace)).To(o.BeTrue())
g.By(" Create FRR router pod on master")
o.Expect(createRouterPod(oc, testDataDir, bgpRouterNamespace)).To(o.BeTrue())
return true
}
func checkBGPSessions(oc *exutil.CLI, bgpRouterNamespace string, bgpSessionCheckTime ...time.Duration) (status bool) {
timeout := 120 * time.Second
if len(bgpSessionCheckTime) > 0 {
timeout = bgpSessionCheckTime[0]
}
var result bool
cmd := []string{"-n", bgpRouterNamespace, bgpRouterPodName, "--", "vtysh", "-c", "show bgp summary"}
errCheck := wait.Poll(60*time.Second, timeout, func() (bool, error) {
e2e.Logf("Checking status of BGP session")
bgpSummaryOutput, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
if err != nil || bgpSummaryOutput == "" || strings.Contains(bgpSummaryOutput, "Active") || strings.Contains(bgpSummaryOutput, "Connect") {
e2e.Logf("Failed to establish BGP session between router and speakers, Trying again..")
result = false
return result, nil
}
e2e.Logf("BGP session established")
result = true
return result, nil
})
if errCheck != nil {
e2e.Logf("Failed to establish BGP session between router and speakers - Timed out")
}
return result
}
func createBGPPeerCR(oc *exutil.CLI, bgppeer bgpPeerResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bgppeer.template, "-p", "NAME="+bgppeer.name, "NAMESPACE="+bgppeer.namespace,
"PASSWORD="+bgppeer.password, "KEEPALIVETIME="+bgppeer.keepAliveTime, "PEER_PORT="+strconv.Itoa(int(bgppeer.peerPort)),
"HOLDTIME="+bgppeer.holdTime, "MY_ASN="+strconv.Itoa(int(bgppeer.myASN)), "PEER_ASN="+strconv.Itoa(int(bgppeer.peerASN)), "PEER_IPADDRESS="+bgppeer.peerAddress)
if err != nil {
e2e.Logf("Error creating BGP Peer %v", err)
return false
}
return true
}
func createBGPAdvertisementCR(oc *exutil.CLI, bgpAdvertisement bgpAdvertisementResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bgpAdvertisement.template, "-p", "NAME="+bgpAdvertisement.name, "NAMESPACE="+bgpAdvertisement.namespace,
"AGGREGATIONLENGTH="+strconv.Itoa(int(bgpAdvertisement.aggregationLength)), "AGGREGATIONLENGTHV6="+strconv.Itoa(int(bgpAdvertisement.aggregationLengthV6)),
"IPADDRESSPOOL1="+bgpAdvertisement.ipAddressPools[0], "COMMUNITIES="+bgpAdvertisement.communities[0],
"NODESLECTORKEY1="+bgpAdvertisement.nodeSelectorsKey, "NODESELECTOROPERATOR1="+bgpAdvertisement.nodeSelectorsOperator,
"WORKER1="+bgpAdvertisement.nodeSelectorValues[0], "WORKER2="+bgpAdvertisement.nodeSelectorValues[1],
"BGPPEER1="+bgpAdvertisement.peer[0])
if err != nil {
e2e.Logf("Error creating BGP advertisement %v", err)
return false
}
return true
}
func createCommunityCR(oc *exutil.CLI, community communityResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", community.template, "-p", "NAME="+community.name, "NAMESPACE="+community.namespace,
"COMMUNITYNAME="+community.communityName, "VALUE="+community.value1+":"+community.value2)
if err != nil {
e2e.Logf("Error creating Community %v", err)
return false
}
return true
}
func checkServiceEvents(oc *exutil.CLI, svcName string, namespace string, reason string) (bool, string) {
fieldSelectorArgs := fmt.Sprintf("reason=%s,involvedObject.kind=Service,involvedObject.name=%s", reason, svcName)
result := false
message := ""
errCheck := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
var svcEvents string
svcEvents, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", namespace, "--field-selector", fieldSelectorArgs).Output()
if err != nil {
return false, nil
}
if !strings.Contains(svcEvents, "No resources found") {
for _, index := range strings.Split(svcEvents, "\n") {
if strings.Contains(index, reason) {
e2e.Logf("Processing event %s for service", index)
if reason == "AllocationFailed" {
messageString := strings.Split(index, ":")
message = messageString[1]
}
result = true
}
}
return true, nil
}
return false, nil
})
if errCheck != nil {
return result, ""
}
return result, message
}
func checkLogLevelPod(oc *exutil.CLI, component string, opNamespace string, level string) (bool, string) {
var podLogLevelOutput string
var err error
if component == "controller" {
podLogLevelOutput, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("pod", "-n", opNamespace, "-l", "component=controller", "-ojson").Output()
if err != nil {
e2e.Logf("Failed to get pod details due to %v", err)
return false, "Get request to get controller pod failed"
}
} else {
speakerPodList, err := exutil.GetAllPodsWithLabel(oc, opNamespace, "component=speaker")
if err != nil {
e2e.Logf("Failed to get pod %v", err)
return false, "Get request to get speaker pod failed"
}
if len(speakerPodList) == 0 {
return false, "Speaker pod list is empty"
}
podLogLevelOutput, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("pod", speakerPodList[0], "-n", opNamespace, "-ojson").Output()
if err != nil {
e2e.Logf("Failed to get details of pod %s due to %v", speakerPodList[0], err)
return false, "Get request to get log level of speaker pod failed"
}
}
if podLogLevelOutput == "" {
return false, fmt.Sprintf("Failed to get log level of %s pod", component)
}
if strings.Contains(podLogLevelOutput, "--log-level="+level) {
return true, ""
}
return false, fmt.Sprintf("The log level %s not set for %s pod", level, component)
}
func checkPrometheusMetrics(oc *exutil.CLI, interval time.Duration, timeout time.Duration, pollImmediate bool, metrics string, matchExpected bool) (bool, error) {
prometheusURL := "localhost:9090/api/v1/query?query=" + metrics
var metricsOutput string
var err error
metricsErr := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, pollImmediate, func(ctx context.Context) (bool, error) {
metricsOutput, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "prometheus-k8s-0", "--", "curl", prometheusURL).Output()
if err != nil {
e2e.Logf("Could not get metrics %s status and trying again, the error is:%v", metrics, err)
return false, nil
}
if matchExpected && !strings.Contains(metricsOutput, metrics) {
return false, nil
}
if !matchExpected && strings.Contains(metricsOutput, metrics) {
return false, nil
}
e2e.Logf("Metrics output %s", metricsOutput)
return true, nil
})
exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Failed to get metric status due to %v", metricsErr))
return true, nil
}
func createBFDProfileCR(oc *exutil.CLI, bfdProfile bfdProfileResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bfdProfile.template, "-p", "NAME="+bfdProfile.name, "NAMESPACE="+bfdProfile.namespace,
"DETECTMULTIPLIER="+strconv.Itoa(int(bfdProfile.detectMultiplier)), "ECHOMODE="+strconv.FormatBool(bfdProfile.echoMode),
"ECHORECEIVEINTERVAL="+strconv.Itoa(int(bfdProfile.echoReceiveInterval)), "ECHOTRANSMITINTERVAL="+strconv.Itoa(int(bfdProfile.transmitInterval)),
"MINIMUMTTL="+strconv.Itoa(int(bfdProfile.minimumTtl)), "PASSIVEMODE="+strconv.FormatBool(bfdProfile.passiveMode),
"RECEIVEINTERVAL="+strconv.Itoa(int(bfdProfile.receiveInterval)), "TRANSMITINTERVAL="+strconv.Itoa(int(bfdProfile.transmitInterval)))
if err != nil {
e2e.Logf(fmt.Sprintf("Error creating BFD profile %v", err))
return false
}
return true
}
func checkBFDSessions(oc *exutil.CLI, ns string) (status bool) {
cmd := []string{"-n", ns, bgpRouterPodName, "--", "vtysh", "-c", "show bfd peers brief"}
errCheck := wait.Poll(60*time.Second, 120*time.Second, func() (bool, error) {
e2e.Logf("Checking status of BFD session")
bfdOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(bfdOutput).NotTo(o.BeEmpty())
if err != nil {
return false, nil
}
if strings.Contains(bfdOutput, "down") {
e2e.Logf("Failed to establish BFD session between router and speakers, Trying again")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Establishing BFD session between router and speakers timed out")
e2e.Logf("BFD session established")
return true
}
func verifyHostPrefixAdvertised(oc *exutil.CLI, ns string, expectedHostPrefixes []string) bool {
e2e.Logf("Checking host prefix")
cmd := []string{"-n", ns, bgpRouterPodName, "--", "vtysh", "-c", "show ip route bgp"}
routeOutput, routeErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(routeErr).NotTo(o.HaveOccurred())
for _, hostPrefix := range expectedHostPrefixes {
if strings.Contains(routeOutput, hostPrefix) {
e2e.Logf("Found host prefix %s", hostPrefix)
} else {
e2e.Logf("Failed to found host prefix %s", hostPrefix)
return false
}
}
return true
}
func checkBGPv4RouteTableEntry(oc *exutil.CLI, ns string, entry string, expectedPaths []string) bool {
cmd := []string{"-n", ns, bgpRouterPodName, "--", "vtysh", "-c", "show bgp ipv4 unicast " + entry}
errCheck := wait.Poll(60*time.Second, 120*time.Second, func() (bool, error) {
e2e.Logf("Checking BGP route table for entry " + entry)
routeOutput, routeErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(routeErr).NotTo(o.HaveOccurred())
if routeErr != nil {
return false, nil
}
for _, path := range expectedPaths {
if strings.Contains(routeOutput, path) {
e2e.Logf("Found expected: %s", path)
} else {
e2e.Logf("Failed to found expected: %s", path)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Checking BGP route table timed out")
return true
}
func createMetalLBAffinityCR(oc *exutil.CLI, metallbcr metalLBAffinityCRResource) (status bool) {
g.By("Creating MetalLB Affinity CR from template")
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", metallbcr.template, "-p", "NAME="+metallbcr.name, "NAMESPACE="+metallbcr.namespace,
"PARAM1="+metallbcr.param1, "PARAM2="+metallbcr.param2)
if err != nil {
e2e.Logf("Error creating MetalLB CR %v", err)
return false
}
return true
}
func getRouterPodNamespace(oc *exutil.CLI) string {
routerNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-A", "-l", "name=router-pod", "--no-headers", "-o=custom-columns=NAME:.metadata.namespace").Output()
if err != nil {
return ""
}
return routerNS
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
fe996814-6db4-4d2b-b17b-b78ab97b9667
|
operatorInstall
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['subscriptionResource', 'namespaceResource', 'operatorGroupResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func operatorInstall(oc *exutil.CLI, sub subscriptionResource, ns namespaceResource, og operatorGroupResource) (status bool) {
//Installing Operator
g.By(" (1) INSTALLING Operator in the namespace")
//Applying the config of necessary yaml files from templates to create metallb operator
g.By("(1.1) Applying namespace template")
err0 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ns.template, "-p", "NAME="+ns.name)
if err0 != nil {
e2e.Logf("Error creating namespace %v", err0)
}
g.By("(1.2) Applying operatorgroup yaml")
err0 = applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", og.template, "-p", "NAME="+og.name, "NAMESPACE="+og.namespace, "TARGETNAMESPACES="+og.targetNamespaces)
if err0 != nil {
e2e.Logf("Error creating operator group %v", err0)
}
g.By("(1.3) Creating subscription yaml from template")
// no need to check for an existing subscription
err0 = applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "OPERATORNAME="+sub.operatorName, "SUBSCRIPTIONNAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"CATALOGSOURCE="+sub.catalog, "CATALOGSOURCENAMESPACE="+sub.catalogNamespace)
if err0 != nil {
e2e.Logf("Error creating subscription %v", err0)
}
//confirming operator install
g.By("(1.4) Verify the operator finished subscribing")
errCheck := wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
subState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(subState, "AtLatestKnown") == 0 {
return true, nil
}
// log full status of sub for installation failure debugging
subState, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status}").Output()
e2e.Logf("Status of subscription: %v", subState)
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Subscription %s in namespace %v does not have expected status", sub.name, sub.namespace))
g.By("(1.5) Get csvName")
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
errCheck = wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", sub.namespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(csvState, "Succeeded") == 0 {
e2e.Logf("CSV check complete!!!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("CSV %v in %v namespace does not have expected status", csvName, sub.namespace))
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
1aff69ef-23d7-4b64-af40-3fd646af8b15
|
createMetalLBCR
|
['metalLBCRResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createMetalLBCR(oc *exutil.CLI, metallbcr metalLBCRResource, metalLBCRTemplate string) (status bool) {
g.By("Creating MetalLB CR from template")
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", metallbcr.template, "-p", "NAME="+metallbcr.name, "NAMESPACE="+metallbcr.namespace,
"NODESELECTORKEY="+metallbcr.nodeSelectorKey, "NODESELECTORVAL="+metallbcr.nodeSelectorVal,
"CONTROLLERSELECTORKEY="+metallbcr.controllerSelectorKey, "CONTROLLERSELECTORVAL="+metallbcr.controllerSelectorVal)
if err != nil {
e2e.Logf("Error creating MetalLB CR %v", err)
return false
}
err = waitForPodWithLabelReady(oc, metallbcr.namespace, "component=speaker")
exutil.AssertWaitPollNoErr(err, "The pods with label component=speaker are not ready")
if err != nil {
e2e.Logf("Speaker Pods did not transition to ready state %v", err)
return false
}
err = waitForPodWithLabelReady(oc, metallbcr.namespace, "component=frr-k8s")
exutil.AssertWaitPollNoErr(err, "The pods with label component=frr-k8s are not ready")
if err != nil {
e2e.Logf("FRR k8s Pods did not transition to ready state %v", err)
return false
}
err = waitForPodWithLabelReady(oc, metallbcr.namespace, "component=controller")
exutil.AssertWaitPollNoErr(err, "The pod with label component=controller is not ready")
if err != nil {
e2e.Logf("Controller pod did not transition to ready state %v", err)
return false
}
e2e.Logf("Controller and speaker pods created successfully")
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4795fc79-7eab-4f25-b86a-e4655c2a0fd5
|
validateAllWorkerNodeMCR
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func validateAllWorkerNodeMCR(oc *exutil.CLI, namespace string) bool {
var (
podList = []string{}
)
nodeList, err := exutil.GetClusterNodesBy(oc, "worker")
if err != nil {
e2e.Logf("Unable to get nodes to determine if node is worker node %s", err)
return false
}
speakerPodList, errSpeaker := exutil.GetAllPodsWithLabel(oc, namespace, "component=speaker")
if errSpeaker != nil {
e2e.Logf("Unable to get list of speaker pods %s", err)
return false
}
if len(speakerPodList) != len(nodeList) {
e2e.Logf("Speaker pods not scheduled on all worker nodes")
return false
}
frrk8sPodList, errFrrk8s := exutil.GetAllPodsWithLabel(oc, namespace, "component=frr-k8s")
if errFrrk8s != nil {
e2e.Logf("Unable to get list of frr-k8s pods %s", err)
return false
}
if len(frrk8sPodList) != len(nodeList) {
e2e.Logf("K8s FRR pods not scheduled on all worker nodes")
return false
}
podList = append(podList, speakerPodList[:]...)
podList = append(podList, frrk8sPodList[:]...)
// Iterate over the speaker and frr-k8s pods to validate they are scheduled on node that is worker node
for _, pod := range podList {
nodeName, _ := exutil.GetPodNodeName(oc, namespace, pod)
e2e.Logf("Pod %s, node name %s", pod, nodeName)
if isWorkerNode(oc, nodeName, nodeList) == false {
return false
}
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7fbfe005-5269-443d-ba1a-453cd33497d7
|
isWorkerNode
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func isWorkerNode(oc *exutil.CLI, nodeName string, nodeList []string) bool {
for i := 0; i <= (len(nodeList) - 1); i++ {
if nodeList[i] == nodeName {
return true
}
}
return false
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
e0be9e77-64c1-4551-843f-4a51d53715df
|
createLoadBalancerService
|
['"strconv"', '"strings"']
|
['loadBalancerServiceResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createLoadBalancerService(oc *exutil.CLI, loadBalancerSvc loadBalancerServiceResource, loadBalancerServiceTemplate string) (status bool) {
var msg, svcFile string
var err error
if strings.Contains(loadBalancerServiceTemplate, "annotated") {
e2e.Logf("Template %s", loadBalancerServiceTemplate)
svcFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", loadBalancerSvc.template, "-p", "NAME="+loadBalancerSvc.name, "NAMESPACE="+loadBalancerSvc.namespace,
"PROTOCOL="+loadBalancerSvc.protocol,
"LABELKEY1="+loadBalancerSvc.labelKey, "LABELVALUE1="+loadBalancerSvc.labelValue,
"ANNOTATIONKEY="+loadBalancerSvc.annotationKey, "ANNOTATIONVALUE="+loadBalancerSvc.annotationValue,
"EXTERNALTRAFFICPOLICY="+loadBalancerSvc.externaltrafficpolicy, "NODEPORTALLOCATION="+strconv.FormatBool(loadBalancerSvc.allocateLoadBalancerNodePorts)).OutputToFile(getRandomString() + "svc.json")
} else {
svcFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", loadBalancerSvc.template, "-p", "NAME="+loadBalancerSvc.name, "NAMESPACE="+loadBalancerSvc.namespace,
"PROTOCOL="+loadBalancerSvc.protocol,
"LABELKEY1="+loadBalancerSvc.labelKey, "LABELVALUE1="+loadBalancerSvc.labelValue,
"EXTERNALTRAFFICPOLICY="+loadBalancerSvc.externaltrafficpolicy, "NODEPORTALLOCATION="+strconv.FormatBool(loadBalancerSvc.allocateLoadBalancerNodePorts)).OutputToFile(getRandomString() + "svc.json")
}
g.By("Creating service file")
if err != nil {
e2e.Logf("Error creating LoadBalancerService %v with %v", err, svcFile)
return false
}
g.By("Applying service file " + svcFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", svcFile, "-n", loadBalancerSvc.namespace).Output()
if err != nil {
e2e.Logf("Could not apply svcFile %v %v", msg, err)
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
392da949-eb09-4c7c-8130-673995fcafc0
|
checkLoadBalancerSvcStatus
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkLoadBalancerSvcStatus(oc *exutil.CLI, namespace string, svcName string, statusCheckTime ...time.Duration) error {
interval := 10 * time.Second
timeout := 300 * time.Second
if len(statusCheckTime) > 0 {
e2e.Logf("Interval %s, Timeout %s", statusCheckTime[0], statusCheckTime[1])
interval = statusCheckTime[0]
timeout = statusCheckTime[1]
}
return wait.Poll(interval, timeout, func() (bool, error) {
e2e.Logf("Checking status of service %s", svcName)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.status.loadBalancer.ingress[0].ip}").Output()
if err != nil {
e2e.Logf("Failed to get service status, error:%v. Trying again", err)
return false, nil
}
if strings.Contains(output, "<pending>") || output == "" {
e2e.Logf("Failed to assign address to service, error:%v. Trying again", err)
return false, nil
}
return true, nil
})
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a4081c5e-bcf1-4f9f-880c-845a8a468400
|
getLoadBalancerSvcIP
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func getLoadBalancerSvcIP(oc *exutil.CLI, namespace string, svcName string) string {
svcIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.status.loadBalancer.ingress[0].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("LoadBalancer service %s's, IP is :%s", svcName, svcIP)
return svcIP
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
d0df50e9-4f9f-4608-bf53-4720b05d0db7
|
validateService
|
['"fmt"', '"net"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func validateService(oc *exutil.CLI, curlHost string, svcExternalIP string) bool {
e2e.Logf("Validating service with IP %s", svcExternalIP)
curlHostIP := net.ParseIP(curlHost)
var curlOutput string
var curlErr error
connectTimeout := "5"
if curlHostIP.To4() != nil {
//From test runner with proxy
var cmdOutput []byte
svcChkCmd := fmt.Sprintf("curl -H 'Cache-Control: no-cache' -x 'http://%s:8888' %s --connect-timeout %s", curlHost, svcExternalIP, connectTimeout)
cmdOutput, curlErr = exec.Command("bash", "-c", svcChkCmd).Output()
curlOutput = string(cmdOutput)
} else {
curlOutput, curlErr = exutil.DebugNode(oc, curlHost, "curl", svcExternalIP, "--connect-timeout", connectTimeout)
}
if strings.Contains(curlOutput, "Hello OpenShift!") {
return true
}
if curlErr != nil {
e2e.Logf("Error %s", curlErr)
return false
}
e2e.Logf("Output of curl %s", curlOutput)
return false
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
ec3a0c65-38aa-49e7-9b97-12c7b2f7d38d
|
obtainMACAddressForIP
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func obtainMACAddressForIP(oc *exutil.CLI, nodeName string, svcExternalIP string, arpReuests int) (string, bool) {
defInterface, intErr := getDefaultInterface(oc)
o.Expect(intErr).NotTo(o.HaveOccurred())
cmd := fmt.Sprintf("arping -I %s %s -c %d", defInterface, svcExternalIP, arpReuests)
//https://issues.redhat.com/browse/OCPBUGS-10321 DebugNodeWithOptionsAndChroot replaced
output, arpErr := exutil.DebugNodeWithOptions(oc, nodeName, []string{"-q"}, "bin/sh", "-c", cmd)
//CI run the command returns non-zero exit code from debug container
if arpErr != nil {
return "", false
}
e2e.Logf("ARP request response %s", output)
re := regexp.MustCompile(`([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})`)
var macAddress string
if re.MatchString(output) {
submatchall := re.FindAllString(output, -1)
macAddress = submatchall[0]
return macAddress, true
} else {
return "", false
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
443b1c17-cb72-4f81-be47-1f1a9e8a6aec
|
getNodeAnnouncingL2Service
|
['"fmt"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func getNodeAnnouncingL2Service(oc *exutil.CLI, svcName string, namespace string) string {
fieldSelectorArgs := fmt.Sprintf("reason=nodeAssigned,involvedObject.kind=Service,involvedObject.name=%s", svcName)
var nodeName string
errCheck := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
var allEvents []string
var svcEvents string
svcEvents, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", namespace, "--field-selector", fieldSelectorArgs).Output()
if err != nil {
return false, nil
}
if !strings.Contains(svcEvents, "No resources found") {
for _, index := range strings.Split(svcEvents, "\n") {
if strings.Contains(index, "announcing from node") {
e2e.Logf("Processing event service %s", index)
re := regexp.MustCompile(`"([^\"]+)"`)
event := re.FindString(index)
allEvents = append(allEvents, event)
}
}
nodeName = strings.Trim(allEvents[len(allEvents)-1], "\"")
return true, nil
}
return false, nil
})
o.Expect(nodeName).NotTo(o.BeEmpty())
o.Expect(errCheck).NotTo(o.HaveOccurred())
return nodeName
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
5e932dce-235a-4367-9951-a7a7bcf372a1
|
isPlatformSuitable
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func isPlatformSuitable(oc *exutil.CLI) bool {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1/rdu2 cluster , skip for other envrionment!!!")
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4c3d4d86-0835-46e0-b58b-1fa80034c943
|
createIPAddressPoolCR
|
['"strconv"']
|
['ipAddressPoolResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createIPAddressPoolCR(oc *exutil.CLI, ipAddresspool ipAddressPoolResource, addressPoolTemplate string) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ipAddresspool.template, "-p", "NAME="+ipAddresspool.name, "NAMESPACE="+ipAddresspool.namespace, "PRIORITY="+strconv.Itoa(int(ipAddresspool.priority)),
"LABELKEY1="+ipAddresspool.label1, "LABELVALUE1="+ipAddresspool.value1, "AUTOASSIGN="+strconv.FormatBool(ipAddresspool.autoAssign), "AVOIDBUGGYIPS="+strconv.FormatBool(ipAddresspool.avoidBuggyIPs),
"ADDRESS1="+ipAddresspool.addresses[0], "ADDRESS2="+ipAddresspool.addresses[1], "NAMESPACE1="+ipAddresspool.namespaces[0], "NAMESPACE2="+ipAddresspool.namespaces[1],
"MLSERVICEKEY1="+ipAddresspool.serviceLabelKey, "MLSERVICEVALUE1="+ipAddresspool.serviceLabelValue, "MESERVICEKEY1="+ipAddresspool.serviceSelectorKey, "MESERVICEOPERATOR1="+ipAddresspool.serviceSelectorOperator, "MESERVICEKEY1VALUE1="+ipAddresspool.serviceSelectorValue[0],
"MLNAMESPACEKEY1="+ipAddresspool.serviceLabelKey, "MLNAMESPACEVALUE1="+ipAddresspool.serviceLabelValue, "MENAMESPACEKEY1="+ipAddresspool.namespaceSelectorKey, "MENAMESPACEOPERATOR1="+ipAddresspool.namespaceSelectorOperator, "MENAMESPACEKEY1VALUE1="+ipAddresspool.namespaceSelectorValue[0])
if err != nil {
e2e.Logf("Error creating IP Addresspool %v", err)
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
0e20366b-4ac1-413f-990a-e41c7d128f89
|
createL2AdvertisementCR
|
['l2AdvertisementResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createL2AdvertisementCR(oc *exutil.CLI, l2advertisement l2AdvertisementResource, l2AdvertisementTemplate string) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", l2advertisement.template, "-p", "NAME="+l2advertisement.name, "NAMESPACE="+l2advertisement.namespace,
"IPADDRESSPOOL1="+l2advertisement.ipAddressPools[0], "INTERFACE1="+l2advertisement.interfaces[0], "INTERFACE2="+l2advertisement.interfaces[1], "INTERFACE3="+l2advertisement.interfaces[2],
"WORKER1="+l2advertisement.nodeSelectorValues[0], "WORKER2="+l2advertisement.nodeSelectorValues[1])
if err != nil {
e2e.Logf("Error creating l2advertisement %v", err)
return false
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
71a8c09e-2c46-4fb0-a90a-13e5204f7165
|
getLoadBalancerSvcNodePort
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func getLoadBalancerSvcNodePort(oc *exutil.CLI, namespace string, svcName string) string {
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace, svcName, "-o=jsonpath={.spec.ports[0].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return nodePort
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
b0114aab-adb0-4e65-8ebc-75269e15ff26
|
createConfigMap
|
['"context"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
['routerConfigMapResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createConfigMap(oc *exutil.CLI, testDataDir string, namespace string, cmdArgs ...string) (status bool) {
var bgpPassword string
var bfdEnabled string
var bfdProfile string
//parse cmd arguments
if len(cmdArgs) > 1 {
e2e.Logf("BGP Password %s, BFD Status %s, BFD Profile %s", cmdArgs[0], cmdArgs[1], cmdArgs[2])
bgpPassword = cmdArgs[0]
bfdEnabled = cmdArgs[1]
bfdProfile = cmdArgs[2]
} else if len(cmdArgs) == 1 {
bgpPassword = cmdArgs[0]
}
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) >= 5).NotTo(o.BeFalse())
var nodeIPs []string
var nodeIP string
for _, node := range nodeList.Items {
nodeIP = getNodeIPv4(oc, namespace, node.Name)
nodeIPs = append(nodeIPs, nodeIP)
}
frrMasterSingleStackConfigMapTemplate := filepath.Join(testDataDir, "frr-master-singlestack-configmap-template.yaml")
frrMasterSingleStackConfigMap := routerConfigMapResource{
name: bgpRouterConfigMapName,
namespace: namespace,
bgpd_enabled: "yes",
bfdd_enabled: bfdEnabled,
routerIP: "192.168.111.60",
node1IP: nodeIPs[0],
node2IP: nodeIPs[1],
node3IP: nodeIPs[2],
node4IP: nodeIPs[3],
node5IP: nodeIPs[4],
password: bgpPassword,
bfdProfile: bfdProfile,
template: frrMasterSingleStackConfigMapTemplate,
}
errTemplate := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", frrMasterSingleStackConfigMap.template, "-p", "NAME="+frrMasterSingleStackConfigMap.name, "NAMESPACE="+frrMasterSingleStackConfigMap.namespace,
"BGPD_ENABLED="+frrMasterSingleStackConfigMap.bgpd_enabled, "BFDD_ENABLED="+frrMasterSingleStackConfigMap.bfdd_enabled, "ROUTER_IP="+frrMasterSingleStackConfigMap.routerIP, "NODE1_IP="+frrMasterSingleStackConfigMap.node1IP,
"NODE2_IP="+frrMasterSingleStackConfigMap.node2IP, "NODE3_IP="+frrMasterSingleStackConfigMap.node3IP, "NODE4_IP="+frrMasterSingleStackConfigMap.node4IP,
"NODE5_IP="+frrMasterSingleStackConfigMap.node5IP, "BFD_PROFILE="+frrMasterSingleStackConfigMap.bfdProfile, "PASSWORD="+frrMasterSingleStackConfigMap.password)
if errTemplate != nil {
e2e.Logf("Error creating config map %v", errTemplate)
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ef57b27b-2ac0-4599-a301-d75534e09f2a
|
createNAD
|
['"path/filepath"']
|
['routerNADResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createNAD(oc *exutil.CLI, testDataDir string, namespace string) (status bool) {
defInterface, intErr := getDefaultInterface(oc)
o.Expect(intErr).NotTo(o.HaveOccurred())
frrMasterSingleStackNADTemplate := filepath.Join(testDataDir, "frr-master-singlestack-nad-template.yaml")
frrMasterSingleStackNAD := routerNADResource{
name: bgpRouterNADName,
namespace: namespace,
interfaceName: defInterface,
template: frrMasterSingleStackNADTemplate,
}
errTemplate := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", frrMasterSingleStackNAD.template, "-p", "NAME="+frrMasterSingleStackNAD.name, "INTERFACE="+frrMasterSingleStackNAD.interfaceName, "NAMESPACE="+frrMasterSingleStackNAD.namespace)
if errTemplate != nil {
e2e.Logf("Error creating network attachment definition %v", errTemplate)
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
8ec41195-c970-4e09-8a3b-3195e60e5573
|
createRouterPod
|
['"path/filepath"']
|
['routerPodResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createRouterPod(oc *exutil.CLI, testDataDir string, namespace string) (status bool) {
frrMasterSingleStackRouterPodTemplate := filepath.Join(testDataDir, "frr-master-singlestack-router-pod-template.yaml")
NADName, errNAD := oc.AsAdmin().WithoutNamespace().Run("get").Args("network-attachment-definitions", "-n", namespace, "--no-headers", "-o=custom-columns=NAME:.metadata.name").Output()
o.Expect(errNAD).NotTo(o.HaveOccurred())
masterNode, errMaster := exutil.GetFirstMasterNode(oc)
o.Expect(errMaster).NotTo(o.HaveOccurred())
frrMasterSingleStackRouterPod := routerPodResource{
name: bgpRouterPodName,
namespace: namespace,
configMapName: bgpRouterConfigMapName,
NADName: NADName,
routerIP: bgpRouterIP,
masterNodeName: masterNode,
template: frrMasterSingleStackRouterPodTemplate,
}
errTemplate := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", frrMasterSingleStackRouterPod.template, "-p", "NAME="+frrMasterSingleStackRouterPod.name, "NAMESPACE="+frrMasterSingleStackRouterPod.namespace,
"CONFIG_MAP_NAME="+frrMasterSingleStackRouterPod.configMapName, "ROUTER_IP="+frrMasterSingleStackRouterPod.routerIP, "MASTER_NODENAME="+frrMasterSingleStackRouterPod.masterNodeName, "NAD_NAME="+frrMasterSingleStackRouterPod.NADName)
if errTemplate != nil {
e2e.Logf("Error creating router pod %v", errTemplate)
return false
}
err := waitForPodWithLabelReady(oc, namespace, "name=router-pod")
o.Expect(err).NotTo(o.HaveOccurred())
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
9826dc9c-2e1c-481a-9920-63d94c61be41
|
setUpExternalFRRRouter
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func setUpExternalFRRRouter(oc *exutil.CLI, bgpRouterNamespace string, cmdArgs ...string) (status bool) {
testDataDir := exutil.FixturePath("testdata", "networking/metallb")
g.By(" Create namespace")
oc.CreateSpecifiedNamespaceAsAdmin(bgpRouterNamespace)
exutil.SetNamespacePrivileged(oc, bgpRouterNamespace)
g.By(" Create config map")
o.Expect(createConfigMap(oc, testDataDir, bgpRouterNamespace, cmdArgs...)).To(o.BeTrue())
g.By(" Create network attachment defiition")
o.Expect(createNAD(oc, testDataDir, bgpRouterNamespace)).To(o.BeTrue())
g.By(" Create FRR router pod on master")
o.Expect(createRouterPod(oc, testDataDir, bgpRouterNamespace)).To(o.BeTrue())
return true
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
c03213a2-6897-4a6a-b76b-8972b4edbf32
|
checkBGPSessions
|
['"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkBGPSessions(oc *exutil.CLI, bgpRouterNamespace string, bgpSessionCheckTime ...time.Duration) (status bool) {
timeout := 120 * time.Second
if len(bgpSessionCheckTime) > 0 {
timeout = bgpSessionCheckTime[0]
}
var result bool
cmd := []string{"-n", bgpRouterNamespace, bgpRouterPodName, "--", "vtysh", "-c", "show bgp summary"}
errCheck := wait.Poll(60*time.Second, timeout, func() (bool, error) {
e2e.Logf("Checking status of BGP session")
bgpSummaryOutput, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(cmd...).Output()
if err != nil || bgpSummaryOutput == "" || strings.Contains(bgpSummaryOutput, "Active") || strings.Contains(bgpSummaryOutput, "Connect") {
e2e.Logf("Failed to establish BGP session between router and speakers, Trying again..")
result = false
return result, nil
}
e2e.Logf("BGP session established")
result = true
return result, nil
})
if errCheck != nil {
e2e.Logf("Failed to establish BGP session between router and speakers - Timed out")
}
return result
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c83240ca-fe4e-4142-9493-926d7b804524
|
createBGPPeerCR
|
['"strconv"']
|
['bgpPeerResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createBGPPeerCR(oc *exutil.CLI, bgppeer bgpPeerResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bgppeer.template, "-p", "NAME="+bgppeer.name, "NAMESPACE="+bgppeer.namespace,
"PASSWORD="+bgppeer.password, "KEEPALIVETIME="+bgppeer.keepAliveTime, "PEER_PORT="+strconv.Itoa(int(bgppeer.peerPort)),
"HOLDTIME="+bgppeer.holdTime, "MY_ASN="+strconv.Itoa(int(bgppeer.myASN)), "PEER_ASN="+strconv.Itoa(int(bgppeer.peerASN)), "PEER_IPADDRESS="+bgppeer.peerAddress)
if err != nil {
e2e.Logf("Error creating BGP Peer %v", err)
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
d97294b2-36a2-46fe-8ffa-47e3a1a893c1
|
createBGPAdvertisementCR
|
['"strconv"']
|
['bgpAdvertisementResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createBGPAdvertisementCR(oc *exutil.CLI, bgpAdvertisement bgpAdvertisementResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bgpAdvertisement.template, "-p", "NAME="+bgpAdvertisement.name, "NAMESPACE="+bgpAdvertisement.namespace,
"AGGREGATIONLENGTH="+strconv.Itoa(int(bgpAdvertisement.aggregationLength)), "AGGREGATIONLENGTHV6="+strconv.Itoa(int(bgpAdvertisement.aggregationLengthV6)),
"IPADDRESSPOOL1="+bgpAdvertisement.ipAddressPools[0], "COMMUNITIES="+bgpAdvertisement.communities[0],
"NODESLECTORKEY1="+bgpAdvertisement.nodeSelectorsKey, "NODESELECTOROPERATOR1="+bgpAdvertisement.nodeSelectorsOperator,
"WORKER1="+bgpAdvertisement.nodeSelectorValues[0], "WORKER2="+bgpAdvertisement.nodeSelectorValues[1],
"BGPPEER1="+bgpAdvertisement.peer[0])
if err != nil {
e2e.Logf("Error creating BGP advertisement %v", err)
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
59bf5a0e-d449-47e3-8541-94074f9e1069
|
createCommunityCR
|
['communityResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createCommunityCR(oc *exutil.CLI, community communityResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", community.template, "-p", "NAME="+community.name, "NAMESPACE="+community.namespace,
"COMMUNITYNAME="+community.communityName, "VALUE="+community.value1+":"+community.value2)
if err != nil {
e2e.Logf("Error creating Community %v", err)
return false
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
dccd61db-08d7-4375-b251-012ec6d7ccad
|
checkServiceEvents
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkServiceEvents(oc *exutil.CLI, svcName string, namespace string, reason string) (bool, string) {
fieldSelectorArgs := fmt.Sprintf("reason=%s,involvedObject.kind=Service,involvedObject.name=%s", reason, svcName)
result := false
message := ""
errCheck := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
var svcEvents string
svcEvents, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", namespace, "--field-selector", fieldSelectorArgs).Output()
if err != nil {
return false, nil
}
if !strings.Contains(svcEvents, "No resources found") {
for _, index := range strings.Split(svcEvents, "\n") {
if strings.Contains(index, reason) {
e2e.Logf("Processing event %s for service", index)
if reason == "AllocationFailed" {
messageString := strings.Split(index, ":")
message = messageString[1]
}
result = true
}
}
return true, nil
}
return false, nil
})
if errCheck != nil {
return result, ""
}
return result, message
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9f2be543-76e4-4777-b70e-6141be6ea661
|
checkLogLevelPod
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkLogLevelPod(oc *exutil.CLI, component string, opNamespace string, level string) (bool, string) {
var podLogLevelOutput string
var err error
if component == "controller" {
podLogLevelOutput, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("pod", "-n", opNamespace, "-l", "component=controller", "-ojson").Output()
if err != nil {
e2e.Logf("Failed to get pod details due to %v", err)
return false, "Get request to get controller pod failed"
}
} else {
speakerPodList, err := exutil.GetAllPodsWithLabel(oc, opNamespace, "component=speaker")
if err != nil {
e2e.Logf("Failed to get pod %v", err)
return false, "Get request to get speaker pod failed"
}
if len(speakerPodList) == 0 {
return false, "Speaker pod list is empty"
}
podLogLevelOutput, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("pod", speakerPodList[0], "-n", opNamespace, "-ojson").Output()
if err != nil {
e2e.Logf("Failed to get details of pod %s due to %v", speakerPodList[0], err)
return false, "Get request to get log level of speaker pod failed"
}
}
if podLogLevelOutput == "" {
return false, fmt.Sprintf("Failed to get log level of %s pod", component)
}
if strings.Contains(podLogLevelOutput, "--log-level="+level) {
return true, ""
}
return false, fmt.Sprintf("The log level %s not set for %s pod", level, component)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
921c41fc-8158-4607-95a6-da0142a13653
|
checkPrometheusMetrics
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkPrometheusMetrics(oc *exutil.CLI, interval time.Duration, timeout time.Duration, pollImmediate bool, metrics string, matchExpected bool) (bool, error) {
prometheusURL := "localhost:9090/api/v1/query?query=" + metrics
var metricsOutput string
var err error
metricsErr := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, pollImmediate, func(ctx context.Context) (bool, error) {
metricsOutput, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "prometheus-k8s-0", "--", "curl", prometheusURL).Output()
if err != nil {
e2e.Logf("Could not get metrics %s status and trying again, the error is:%v", metrics, err)
return false, nil
}
if matchExpected && !strings.Contains(metricsOutput, metrics) {
return false, nil
}
if !matchExpected && strings.Contains(metricsOutput, metrics) {
return false, nil
}
e2e.Logf("Metrics output %s", metricsOutput)
return true, nil
})
exutil.AssertWaitPollNoErr(metricsErr, fmt.Sprintf("Failed to get metric status due to %v", metricsErr))
return true, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4e2668d9-9d7b-412a-9529-e0f318d982cf
|
createBFDProfileCR
|
['"fmt"', '"strconv"']
|
['bfdProfileResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createBFDProfileCR(oc *exutil.CLI, bfdProfile bfdProfileResource) (status bool) {
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", bfdProfile.template, "-p", "NAME="+bfdProfile.name, "NAMESPACE="+bfdProfile.namespace,
"DETECTMULTIPLIER="+strconv.Itoa(int(bfdProfile.detectMultiplier)), "ECHOMODE="+strconv.FormatBool(bfdProfile.echoMode),
"ECHORECEIVEINTERVAL="+strconv.Itoa(int(bfdProfile.echoReceiveInterval)), "ECHOTRANSMITINTERVAL="+strconv.Itoa(int(bfdProfile.transmitInterval)),
"MINIMUMTTL="+strconv.Itoa(int(bfdProfile.minimumTtl)), "PASSIVEMODE="+strconv.FormatBool(bfdProfile.passiveMode),
"RECEIVEINTERVAL="+strconv.Itoa(int(bfdProfile.receiveInterval)), "TRANSMITINTERVAL="+strconv.Itoa(int(bfdProfile.transmitInterval)))
if err != nil {
e2e.Logf(fmt.Sprintf("Error creating BFD profile %v", err))
return false
}
return true
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
822ef344-c571-42bd-9488-3ee500d7e277
|
checkBFDSessions
|
['"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkBFDSessions(oc *exutil.CLI, ns string) (status bool) {
cmd := []string{"-n", ns, bgpRouterPodName, "--", "vtysh", "-c", "show bfd peers brief"}
errCheck := wait.Poll(60*time.Second, 120*time.Second, func() (bool, error) {
e2e.Logf("Checking status of BFD session")
bfdOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(bfdOutput).NotTo(o.BeEmpty())
if err != nil {
return false, nil
}
if strings.Contains(bfdOutput, "down") {
e2e.Logf("Failed to establish BFD session between router and speakers, Trying again")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Establishing BFD session between router and speakers timed out")
e2e.Logf("BFD session established")
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
b550f11c-b4cf-48f5-8d9a-a6164616d3d7
|
verifyHostPrefixAdvertised
|
['"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func verifyHostPrefixAdvertised(oc *exutil.CLI, ns string, expectedHostPrefixes []string) bool {
e2e.Logf("Checking host prefix")
cmd := []string{"-n", ns, bgpRouterPodName, "--", "vtysh", "-c", "show ip route bgp"}
routeOutput, routeErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(routeErr).NotTo(o.HaveOccurred())
for _, hostPrefix := range expectedHostPrefixes {
if strings.Contains(routeOutput, hostPrefix) {
e2e.Logf("Found host prefix %s", hostPrefix)
} else {
e2e.Logf("Failed to found host prefix %s", hostPrefix)
return false
}
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8b8cbb7c-9e55-4b44-957e-065a5846c002
|
checkBGPv4RouteTableEntry
|
['"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func checkBGPv4RouteTableEntry(oc *exutil.CLI, ns string, entry string, expectedPaths []string) bool {
cmd := []string{"-n", ns, bgpRouterPodName, "--", "vtysh", "-c", "show bgp ipv4 unicast " + entry}
errCheck := wait.Poll(60*time.Second, 120*time.Second, func() (bool, error) {
e2e.Logf("Checking BGP route table for entry " + entry)
routeOutput, routeErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args(cmd...).Output()
o.Expect(routeErr).NotTo(o.HaveOccurred())
if routeErr != nil {
return false, nil
}
for _, path := range expectedPaths {
if strings.Contains(routeOutput, path) {
e2e.Logf("Found expected: %s", path)
} else {
e2e.Logf("Failed to found expected: %s", path)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, "Checking BGP route table timed out")
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
eaaab0f9-87cb-45d9-8404-3401a4b7cfbe
|
createMetalLBAffinityCR
|
['metalLBAffinityCRResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func createMetalLBAffinityCR(oc *exutil.CLI, metallbcr metalLBAffinityCRResource) (status bool) {
g.By("Creating MetalLB Affinity CR from template")
err := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", metallbcr.template, "-p", "NAME="+metallbcr.name, "NAMESPACE="+metallbcr.namespace,
"PARAM1="+metallbcr.param1, "PARAM2="+metallbcr.param2)
if err != nil {
e2e.Logf("Error creating MetalLB CR %v", err)
return false
}
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8427c5d4-985a-4b9e-8e24-2fd27da6aa2f
|
getRouterPodNamespace
|
github.com/openshift/openshift-tests-private/test/extended/networking/metallb_util.go
|
func getRouterPodNamespace(oc *exutil.CLI) string {
routerNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-A", "-l", "name=router-pod", "--no-headers", "-o=custom-columns=NAME:.metadata.namespace").Output()
if err != nil {
return ""
}
return routerNS
}
|
networking
| |||||
test
|
openshift/openshift-tests-private
|
34a699cd-21b5-49f2-b782-83fc8d43aebc
|
microshift
|
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
package networking
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN microshift", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithoutNamespace("SDN-microshift")
ipStackType string
)
g.BeforeEach(func() {
ipStackType = checkMicroshiftIPStackType(oc)
e2e.Logf("This cluster is %s microshift", ipStackType)
})
// author: [email protected]
g.It("MicroShiftOnly-Author:anusaxen-Critical-60331-mixed ingress and egress policies can work well", func() {
var (
caseID = "60331"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace1 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
e2eTestNamespace2 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloSdnFile = filepath.Join(buildPruningBaseDir, "hellosdn.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress_49696.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/ingress_49696.yaml")
)
exutil.By("Create 1st namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
exutil.By("create test pods")
createResourceFromFile(oc, e2eTestNamespace1, testPodFile)
createResourceFromFile(oc, e2eTestNamespace1, helloSdnFile)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, e2eTestNamespace1, "name=test-pods"), fmt.Sprintf("this pod with label name=test-pods in ns/%s not ready", e2eTestNamespace1))
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, e2eTestNamespace1, "name=hellosdn"), fmt.Sprintf("this pod with label name=hellosdn in ns/%s not ready", e2eTestNamespace1))
hellosdnPodNameNs1 := getPodName(oc, e2eTestNamespace1, "name=hellosdn")
exutil.By("create egress type networkpolicy in ns1")
createResourceFromFile(oc, e2eTestNamespace1, egressTypeFile)
exutil.By("create ingress type networkpolicy in ns1")
createResourceFromFile(oc, e2eTestNamespace1, ingressTypeFile)
exutil.By("#. Create 2nd namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
exutil.By("create test pods in second namespace")
createResourceFromFile(oc, e2eTestNamespace2, helloSdnFile)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, e2eTestNamespace2, "name=hellosdn"), fmt.Sprintf("this pod with label name=hellosdn in ns/%s not ready", e2eTestNamespace2))
exutil.By("Get IP of the test pods in second namespace.")
hellosdnPodNameNs2 := getPodName(oc, e2eTestNamespace2, "name=hellosdn")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
hellosdnPodIP1Ns2 := getPodIPv4(oc, e2eTestNamespace2, hellosdnPodNameNs2[0])
exutil.By("curl from ns1 hellosdn pod to ns2 pod")
_, err := e2eoutput.RunHostCmd(e2eTestNamespace1, hellosdnPodNameNs1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(hellosdnPodIP1Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
hellosdnPodIP1Ns2_v6 := getPodIPv6(oc, e2eTestNamespace2, hellosdnPodNameNs2[0], ipStackType)
exutil.By("curl from ns1 hellosdn pod to ns2 pod")
_, err := e2eoutput.RunHostCmd(e2eTestNamespace1, hellosdnPodNameNs1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(hellosdnPodIP1Ns2_v6, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:anusaxen-High-60332-Network Policies should work with OVNKubernetes when traffic hairpins back to the same source through a service", func() {
var (
caseID = "60332"
baseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
allowfromsameNS = filepath.Join(baseDir, "networkpolicy/allow-from-same-namespace.yaml")
svcIP string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod1",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 1st hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod1")
pod_pmtrs = map[string]string{
"$podname": "hello-pod2",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 2nd hello pod in same namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod2")
svc_pmtrs := map[string]string{
"$servicename": "test-service",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "Cluster",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "PreferDualStack",
"$selector": "hello-pod",
"$serviceType": "ClusterIP",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By("create allow-from-same-namespace ingress networkpolicy in ns")
createResourceFromFile(oc, e2eTestNamespace, allowfromsameNS)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
exutil.By("Get Pod IPs")
helloPod1IP := getPodIPv4(oc, e2eTestNamespace, "hello-pod1")
helloPod2IP := getPodIPv4(oc, e2eTestNamespace, "hello-pod2")
exutil.By("Get svc IP")
svcIP = getSvcIPv4(oc, e2eTestNamespace, "test-service")
exutil.By("curl hello-pod1 to hello-pod2")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod1IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to hello-pod1")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod2IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
for i := 0; i < 5; i++ {
exutil.By("curl hello-pod1 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
}
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
exutil.By("Get Pod IPs")
helloPod1IP := getPodIPv6(oc, e2eTestNamespace, "hello-pod1", ipStackType)
helloPod2IP := getPodIPv6(oc, e2eTestNamespace, "hello-pod2", ipStackType)
exutil.By("Get svc IP")
if ipStackType == "ipv6single" {
svcIP = getSvcIPv6SingleStack(oc, e2eTestNamespace, "test-service")
} else {
svcIP = getSvcIPv6(oc, e2eTestNamespace, "test-service")
}
exutil.By("curl hello-pod1 to hello-pod2")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod1IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to hello-pod1")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod2IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
for i := 0; i < 5; i++ {
exutil.By("curl hello-pod1 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
}
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:anusaxen-High-60426-podSelector allow-to and allow-from can work together", func() {
var (
caseID = "60426"
baseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace1 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
e2eTestNamespace2 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
ingressTypeFile = filepath.Join(baseDir, "networkpolicy/default-deny-ingress.yaml")
allowfromRed = filepath.Join(baseDir, "microshift/np-allow-from-red.yaml")
allowtoBlue = filepath.Join(baseDir, "microshift/np-allow-to-blue.yaml")
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
exutil.By("create 4 test pods in e2enamespace1")
for i := 0; i < 4; i++ {
pod_pmtrs := map[string]string{
"$podname": "test-pod" + strconv.Itoa(i),
"$namespace": e2eTestNamespace1,
"$label": "test-pod" + strconv.Itoa(i),
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace1, "test-pod"+strconv.Itoa(i))
}
var testPodNS1 [4]string
var testPodIPNS1 [4]string
var testPodIPNS1_v6 [4]string
exutil.By("Get IP of the test pods in e2eTestNamespace1 namespace.")
for i := 0; i < 4; i++ {
testPodNS1[i] = strings.Join(getPodName(oc, e2eTestNamespace1, "name=test-pod"+strconv.Itoa(i)), "")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
testPodIPNS1[i] = getPodIPv4(oc, e2eTestNamespace1, testPodNS1[i])
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
testPodIPNS1_v6[i] = getPodIPv6(oc, e2eTestNamespace1, testPodNS1[i], ipStackType)
}
}
// label pod0 and pod1 with type=red and type=blue respectively in e2eTestNamespace1
err := exutil.LabelPod(oc, e2eTestNamespace1, testPodNS1[0], "type=red")
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.LabelPod(oc, e2eTestNamespace1, testPodNS1[1], "type=blue")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create 2nd namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
exutil.By("create 2 test pods in e2enamespace1")
for i := 0; i < 2; i++ {
pod_pmtrs := map[string]string{
"$podname": "test-pod" + strconv.Itoa(i),
"$namespace": e2eTestNamespace2,
"$label": "test-pod" + strconv.Itoa(i),
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace2, "test-pod"+strconv.Itoa(i))
}
var testPodNS2 [2]string
var testPodIPNS2 [2]string
var testPodIPNS2_v6 [2]string
exutil.By("Get IP of the test pods in e2eTestNamespace2 namespace.")
for i := 0; i < 2; i++ {
testPodNS2[i] = strings.Join(getPodName(oc, e2eTestNamespace2, "name=test-pod"+strconv.Itoa(i)), "")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
testPodIPNS2[i] = getPodIPv4(oc, e2eTestNamespace2, testPodNS2[i])
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
testPodIPNS2_v6[i] = getPodIPv6(oc, e2eTestNamespace2, testPodNS2[i], ipStackType)
}
}
// label pod0 with type=red in e2eTestNamespace2
err = exutil.LabelPod(oc, e2eTestNamespace2, testPodNS2[0], "type=red")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("create default deny ingress type networkpolicy in 1st namespace")
createResourceFromFile(oc, e2eTestNamespace1, ingressTypeFile)
exutil.By("create allow-from-red and allow-from-blue type networkpolicy in 1st namespace")
createResourceFromFile(oc, e2eTestNamespace1, allowfromRed)
createResourceFromFile(oc, e2eTestNamespace1, allowtoBlue)
exutil.By("Try to access the pod in e2eTestNamespace1 from each pod")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
exutil.By("curl testPodNS10 to testPodNS13")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("Try to access the pod from e2eTestNamespace2 now")
exutil.By("curl testPodNS20 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
exutil.By("curl testPodNS10 to testPodNS13")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("Try to access the pod from e2eTestNamespace2 now")
exutil.By("curl testPodNS20 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:qiowang-High-60290-Idling/Unidling services", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
namespace = "test-60290"
)
exutil.By("create namespace")
defer oc.DeleteSpecifiedNamespaceAsAdmin(namespace)
oc.CreateSpecifiedNamespaceAsAdmin(namespace)
exutil.By("create test pods with rc and service")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", testSvcFile, "-n", namespace).Execute()
createResourceFromFile(oc, namespace, testSvcFile)
waitForPodWithLabelReady(oc, namespace, "name=test-pods")
svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace).Output()
o.Expect(svcErr).NotTo(o.HaveOccurred())
o.Expect(svcOutput).To(o.ContainSubstring("test-service"))
exutil.By("idle test-service")
idleOutput, idleErr := oc.AsAdmin().WithoutNamespace().Run("idle").Args("-n", namespace, "test-service").Output()
o.Expect(idleErr).NotTo(o.HaveOccurred())
o.Expect(idleOutput).To(o.ContainSubstring("The service \"%v/test-service\" has been marked as idled", namespace))
exutil.By("check test pods are terminated")
getPodOutput := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
output, getPodErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace).Output()
o.Expect(getPodErr).NotTo(o.HaveOccurred())
e2e.Logf("pods status: %s", output)
if strings.Contains(output, "No resources found") {
return true, nil
}
e2e.Logf("pods are not terminated, try again")
return false, nil
})
exutil.AssertWaitPollNoErr(getPodOutput, fmt.Sprintf("Fail to terminate pods:%s", getPodOutput))
// for micorshift: unidling is not supported now, and manual re-scaling the replicas is required
// https://issues.redhat.com/browse/USHIFT-503
exutil.By("re-scaling the replicas")
_, rescaleErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("replicationcontroller/test-rc", "-n", namespace, "-p", "{\"spec\":{\"replicas\":2}}", "--type=merge").Output()
o.Expect(rescaleErr).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, namespace, "name=test-pods")
})
// author: [email protected]
g.It("Author:weliang-MicroShiftOnly-Medium-60550-Pod should be accessible via node ip and host port", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "hostport-pod.yaml")
ns = "test-ocp-60550"
)
exutil.By("create a test namespace")
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create a test pod")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", testPodFile, "-n", ns).Execute()
createResourceFromFile(oc, ns, testPodFile)
waitForPodWithLabelReady(oc, ns, "name=hostport-pod")
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
exutil.By("Get the IP address from the worker node")
nodeIPv4 := getNodeIPv4(oc, ns, nodeList.Items[0].Name)
exutil.By("Verify the pod should be accessible via nodeIP:hostport")
ipv4URL := net.JoinHostPort(nodeIPv4, "9500")
curlOutput, err := exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", ipv4URL, "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(curlOutput).To(o.ContainSubstring("Hello Hostport Pod"))
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
exutil.By("Get the IP address from the worker node")
nodeIPv6 := getMicroshiftNodeIPV6(oc)
exutil.By("Verify the pod should be accessible via nodeIP:hostport")
ipv6URL := net.JoinHostPort(nodeIPv6, "9500")
curlOutput, err := exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", ipv6URL, "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(curlOutput).To(o.ContainSubstring("Hello Hostport Pod"))
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:anusaxen-High-60746-Check nodeport service for external/internal traffic policy and via secondary nic works well on Microshift[Disruptive]", func() {
var (
caseID = "60746"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
nodeName string
etp string
itp string
nodeIP string
serviceName string
output string
)
if ipStackType == "ipv6single" {
// can not run on ipv6 as secondary nic doesn't have available ipv6 address.
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
exutil.By("Creating hello pod in namespace")
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
exutil.By("Creating test pod in namespace")
pod_pmtrs = map[string]string{
"$podname": "test-pod",
"$namespace": e2eTestNamespace,
"$label": "test-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "test-pod")
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
secNICip := getSecondaryNICip(oc)
//in first iteration we will create Clustr-Cluter ETP and ITP services and in 2nd iteration it will be Local-Local
for j := 0; j < 2; j++ {
if j == 0 {
itp = ""
etp = ""
exutil.By("Create NodePort service with ETP and ITP as Cluster")
serviceName = "nptest-etp-itp-cluster"
} else {
etp = "Local"
itp = "Local"
exutil.By("Create NodePort service with ETP and ITP as Local")
serviceName = "nptest-etp-itp-local"
}
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": itp,
"$externalTrafficPolicy": etp,
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "NodePort",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By(fmt.Sprintf("Get service port and NodeIP value for service %s", serviceName))
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL := net.JoinHostPort(nodeIP, nodePort)
sec_nic_url := net.JoinHostPort(secNICip, nodePort)
//Check ETP and ITP Cluster and Local type services via debugnode and test pod respectively
// Access service from nodeIP to validate ETP Cluster/Local. Default emty svc_pmtrs will create both ETP and ITP as Cluster in first iteration
exutil.By(fmt.Sprintf("Curl NodePort service %s on node IP", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
//Access service via secondary NIC to simulate ETP Cluster/Local
exutil.By(fmt.Sprintf("Curl NodePort service %s on secondary node IP", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", sec_nic_url, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
// Access service from cluster's pod network to validate ITP Cluster/Local
exutil.By(fmt.Sprintf("Curl NodePort Service %s again from a test pod", serviceName))
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "test-pod", "curl --connect-timeout 5 -s "+svcURL)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
}
//following block of code is to test impact of firewalld reload on any of service created earlier
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "nptest-etp-itp-cluster", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL := net.JoinHostPort(nodeIP, nodePort)
exutil.By("Reload the firewalld and then check nodeport service still can be worked")
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --reload")
o.Expect(err).NotTo(o.HaveOccurred())
firewallState, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --state")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(firewallState).To(o.ContainSubstring("running"))
_, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "10")
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
// modified: [email protected]
g.It("MicroShiftOnly-Author:zzhao-Critical-60968-Check loadbalance service with different external and internal traffic policies works well on Microshift", func() {
var (
caseID = "60968"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
etp string
itp string
nodeName string
serviceName string
output string
nodeIPlist []string
nodeIP string
svcURL string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
exutil.By("Creating pods in namespace")
for j := 0; j < 2; j++ {
pod_pmtrs := map[string]string{
"$podname": "hello-pod-" + strconv.Itoa(j),
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod-"+strconv.Itoa(j))
}
exutil.By("Creating test pod in namespace")
pod_pmtrs := map[string]string{
"$podname": "test-pod",
"$namespace": e2eTestNamespace,
"$label": "test-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "test-pod")
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod-0", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
} else {
nodeIP = getMicroshiftNodeIPV6(oc)
}
e2e.Logf("nodeip list is %v", nodeIPlist)
policy := [2]string{"Cluster", "Local"}
for i := 0; i < 2; i++ {
if i == 0 {
itp = ""
etp = policy[i]
exutil.By(fmt.Sprintf("Create LoadBalance service with ETP and ITP as %s", policy[i]))
serviceName = "lbtest-etp-itp-" + strings.ToLower(etp)
} else {
etp = policy[i]
itp = policy[i]
exutil.By(fmt.Sprintf("Create LoadBalance service with ETP and ITP as %s", policy[i]))
serviceName = "lbtest-etp-itp-" + strings.ToLower(policy[i])
}
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": itp,
"$externalTrafficPolicy": etp,
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By(fmt.Sprintf("Get service port and NodeIP value for service %s", serviceName))
svcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].port}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if serviceName == "lbtest-etp-itp-cluster" {
svcURL = net.JoinHostPort(nodeIP, svcPort)
//Access service from host networked pod
exutil.By(fmt.Sprintf("Curl LoadBalance service %s", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Delete lb service %s", serviceName))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", serviceName, "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL = net.JoinHostPort(nodeIP, svcPort)
//firewalld entries are removed when service is deleted
exutil.By(fmt.Sprintf("Curl LoadBalance service %s again", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
} else {
svcURL = net.JoinHostPort(nodeIP, svcPort)
// Access service from within cluster from pod on cluster network
exutil.By(fmt.Sprintf("Curl loadbalance Service %s from within cluster", serviceName))
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "test-pod", "curl --connect-timeout 5 -s "+svcURL)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Delete lb service %s", serviceName))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", serviceName, "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL = net.JoinHostPort(nodeIP, svcPort)
exutil.By(fmt.Sprintf("Curl loadbalance Service %s again from within cluster", serviceName))
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "test-pod", "curl --connect-timeout 5 -s "+svcURL)
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
}
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:zzhao-Medium-61218-only one loadbalance can be located at same time if creating multi loadbalance service with same port[Serial]", func() {
var (
caseID = "61218"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
nodeIP string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
exutil.By("Create one loadbalance service")
svc_pmtrs := map[string]string{
"$servicename": "lbtest",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By("Create second loadbalance service")
svc_pmtrs2 := map[string]string{
"$servicename": "lbtest2",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs2)
exutil.By("Get service port and NodeIP value")
svcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest", "-o=jsonpath={.spec.ports[*].port}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
} else {
nodeIP = getMicroshiftNodeIPV6(oc)
}
exutil.By("Check first lb service get node ip")
lbIngressip, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest", "-o=jsonpath={.status.loadBalancer.ingress[*].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lbIngressip).Should(o.ContainSubstring(nodeIP))
exutil.By("Check second lb service should't get node ip")
lbIngressip2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest2", "-o=jsonpath={.status.loadBalancer.ingress[*].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lbIngressip2).ShouldNot(o.ContainSubstring(nodeIP))
svcURL := net.JoinHostPort(nodeIP, svcPort)
exutil.By("curl loadbalance Service")
output, err := exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("Delete lb service")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "lbtest", "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
output1 := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
lbIngressip2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest2", "-o=jsonpath={.status.loadBalancer.ingress[*].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(lbIngressip2, nodeIP) {
return true, nil
}
e2e.Logf("second loadbalance still not get node ip")
return false, nil
})
exutil.AssertWaitPollNoErr(output1, fmt.Sprintf("lbtest2 cannot get the nodeip:%s", output1))
exutil.By("check lbtest2 ingressip can be accessed")
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
})
// author: [email protected]
g.It("MicroShiftOnly-Author:zzhao-Medium-61168-hostnetwork pods and container pods should be able to access kubernets svc api after reboot cluster[Disruptive]", func() {
var (
caseID = "61168"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
)
if ipStackType == "ipv6single" {
// svc api is ipv4 address
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
hellosdnPodName := getPodName(oc, e2eTestNamespace, "name=hello-pod")
exutil.By("using dns resolve as hostnetwork pods for checking")
dnsPodName := getPodName(oc, "openshift-dns", "dns.operator.openshift.io/daemonset-node-resolver=")
exutil.By("Check container pod and hostnetwork can access kubernete api")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("HTTP/2 403"))
output1, err := e2eoutput.RunHostCmd("openshift-dns", dnsPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output1).Should(o.ContainSubstring("HTTP/2 403"))
exutil.By("reboot node")
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
rebootUshiftNode(oc, nodeName)
exutil.By("Check container pod can access kubernete api")
curlOutput := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
if strings.Contains(output, "HTTP/2 403") {
return true, nil
}
e2e.Logf("pods are not ready, try again")
return false, nil
})
exutil.AssertWaitPollNoErr(curlOutput, fmt.Sprintf("Fail to terminate pods:%s", curlOutput))
exutil.By("Check hostnetwork can access kubernete api")
curlHostnetworkOutput := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
output, err = e2eoutput.RunHostCmd("openshift-dns", dnsPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
if strings.Contains(output, "HTTP/2 403") {
return true, nil
}
e2e.Logf("dns pods are not ready, try again")
return false, nil
})
exutil.AssertWaitPollNoErr(curlHostnetworkOutput, fmt.Sprintf("Fail to terminate pods:%s", curlHostnetworkOutput))
})
// author: [email protected]
g.It("MicroShiftOnly-Author:zzhao-Medium-61164-ovn MTU can be updated if it's value is less than default interface mtu[Disruptive]", func() {
var (
caseID = "61164"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
mtu = "1400"
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
hellosdnPodName := getPodName(oc, e2eTestNamespace, "name=hello-pod")
exutil.By("Update the cluster MTU to 1400")
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
setMTU(oc, nodeName, mtu)
defer rollbackMTU(oc, nodeName)
exutil.By("Create one new pods to check the mtu")
pod_pmtrs1 := map[string]string{
"$podname": "hello-pod2",
"$namespace": e2eTestNamespace,
"$label": "hello-pod2",
}
createPingPodforUshift(oc, pod_pmtrs1)
waitPodReady(oc, e2eTestNamespace, "hello-pod2")
hellosdnPodName2 := getPodName(oc, e2eTestNamespace, "name=hello-pod2")
exutil.By("Check new created pod mtu changed")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName2[0], "cat /sys/class/net/eth0/mtu")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(mtu))
exutil.By("check existing pod mtu changed")
output2, err := e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName[0], "cat /sys/class/net/eth0/mtu")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output2).Should(o.ContainSubstring(mtu))
})
// author: [email protected]
g.It("MicroShiftOnly-Author:zzhao-Medium-61161-Expose coredns forward as configurable option[Disruptive][Flaky]", func() {
// need confirm support or not
g.Skip("skip this case")
exutil.By("Check the default coredns config file")
dnsConfigMap, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-dns", "cm", "dns-default", "-o=jsonpath={.data.Corefile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(dnsConfigMap).Should(o.ContainSubstring("forward . /etc/resolv.conf"))
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodeName := nodeList.Items[0].Name
exutil.By("cp the default dns config file to a new path")
cpNewConfig := "mkdir /run/systemd/resolve && cp /etc/resolv.conf /run/systemd/resolve/resolv.conf && systemctl restart microshift"
rmDnsConfig := "rm -fr /run/systemd/resolve && systemctl restart microshift"
defer func() {
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", rmDnsConfig)
output := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
dnsConfigMap, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-dns", "cm", "dns-default", "-o=jsonpath={.data.Corefile}").Output()
if strings.Contains(dnsConfigMap, "/etc/resolv.conf") {
return true, nil
}
e2e.Logf("dns config has not been updated")
return false, nil
})
exutil.AssertWaitPollNoErr(output, fmt.Sprintf("Fail to updated dns configmap:%s", output))
}()
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cpNewConfig)
exutil.By("Check the coredns is consuming the new config file")
output := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
dnsConfigMap, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-dns", "cm", "dns-default", "-o=jsonpath={.data.Corefile}").Output()
if strings.Contains(dnsConfigMap, "/run/systemd/resolve/resolv.conf") {
return true, nil
}
e2e.Logf("dns config has not been updated")
return false, nil
})
exutil.AssertWaitPollNoErr(output, fmt.Sprintf("Fail to updated dns configmap:%s", output))
})
// author: [email protected]
g.It("MicroShiftOnly-Author:huirwang-High-60969-Blocking external access to the NodePort service on a specific host interface. [Disruptive]", func() {
var (
caseID = "60969"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
nodeIP string
svcURL string
ipDropCmd string
)
//only run on ipv4, as no route to cluster ipv6 from external
if ipStackType == "ipv6single" {
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
svc_pmtrs := map[string]string{
"$servicename": "test-service-etp-cluster",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "PreferDualStack",
"$selector": "hello-pod",
"$serviceType": "NodePort",
}
createServiceforUshift(oc, svc_pmtrs)
svc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "test-service-etp-cluster").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svc).Should(o.ContainSubstring("test-service-etp-cluster"))
exutil.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "test-service-etp-cluster", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
svcURL = net.JoinHostPort(nodeIP, nodePort)
exutil.By("curl NodePort Service")
curlNodeCmd := fmt.Sprintf("curl %s -s --connect-timeout 5", svcURL)
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Insert a new rule in the nat table PREROUTING chain to drop all packets that match the destination port and IP address")
defer removeIPRules(oc, nodePort, nodeIP, nodeName)
ipDropCmd = fmt.Sprintf("nft -a insert rule ip nat PREROUTING tcp dport %v ip daddr %s drop", nodePort, nodeIP)
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", ipDropCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NodePort Service is blocked")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).To(o.HaveOccurred())
exutil.By("Remove the added new rule")
removeIPRules(oc, nodePort, nodeIP, nodeName)
exutil.By("Verify the NodePort service can be accessed again.")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("MicroShiftOnly-Author:asood-High-64753-Check disabling IPv4 forwarding makes the nodeport service inaccessible. [Disruptive]", func() {
var (
caseID = "64753"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
serviceName = "test-service-" + caseID
)
//only run on ipv4
if ipStackType == "ipv6single" {
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "NodePort",
}
createServiceforUshift(oc, svc_pmtrs)
svc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svc).Should(o.ContainSubstring(serviceName))
exutil.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
nodeIP := getNodeIPv4(oc, e2eTestNamespace, nodeName)
svcURL := net.JoinHostPort(nodeIP, nodePort)
e2e.Logf("Service URL %s", svcURL)
exutil.By("Curl NodePort Service")
curlNodeCmd := fmt.Sprintf("curl %s -s --connect-timeout 5", svcURL)
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Disable IPv4 forwarding")
enableIPv4ForwardingCmd := fmt.Sprintf("sysctl -w net.ipv4.ip_forward=1")
disableIPv4ForwardingCmd := fmt.Sprintf("sysctl -w net.ipv4.ip_forward=0")
defer func() {
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", enableIPv4ForwardingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", disableIPv4ForwardingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NodePort Service is no longer accessible")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).To(o.HaveOccurred())
exutil.By("Enable IPv4 forwarding")
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", enableIPv4ForwardingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify the NodePort service can be accessed again.")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("MicroShiftOnly-Author:huirwang-Medium-61162-Hostname changes should not block ovn. [Disruptive]", func() {
var (
caseID = "61162"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
)
g.Skip("skip this case")
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) > 0).To(o.BeTrue())
nodeName := nodeList.Items[0].Name
exutil.By("Change node hostname")
newHostname := fmt.Sprintf("%v.61162", nodeName)
e2e.Logf("Changing the host name to %v", newHostname)
setHostnameCmd := fmt.Sprintf("hostnamectl set-hostname %v", newHostname)
defer func() {
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "hostnamectl set-hostname \"\" ;hostnamectl set-hostname --transient "+nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
restartMicroshiftService(oc, nodeName)
}()
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", setHostnameCmd)
o.Expect(err).NotTo(o.HaveOccurred())
restartMicroshiftService(oc, nodeName)
exutil.By("Verify the ovn pods running well.")
err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-master")
exutil.AssertWaitPollNoErr(err, "wait for ovnkube-master pods ready timeout")
err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.AssertWaitPollNoErr(err, "wait for ovnkube-node pods ready timeout")
exutil.By("Verify the hostname is new hostname ")
hostnameOutput, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "cat /etc/hostname")
o.Expect(err).NotTo(o.HaveOccurred())
pattern := `dhcp.*\.61162`
re := regexp.MustCompile(pattern)
cuhostname := re.FindString(hostnameOutput)
e2e.Logf("Current hostname is %v,expected hostname is %v", cuhostname, newHostname)
o.Expect(cuhostname == newHostname).To(o.BeTrue())
exutil.By("Verify test pods working well.")
pod_pmtrs := map[string]string{
"$podname": "hello-pod1",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 1st hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod1")
pod_pmtrs = map[string]string{
"$podname": "hello-pod2",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 2nd hello pod in same namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod2")
exutil.By("curl hello-pod2 to hello-pod1")
helloPod1IP := getPodIPv4(oc, e2eTestNamespace, "hello-pod1")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod1IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
})
// author: [email protected]
g.It("MicroShiftOnly-Author:anusaxen-High-64752-Conntrack rule deletion for UDP traffic when NodePort service ep gets deleted", func() {
var (
caseID = "64752"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
udpListenerPod = filepath.Join(buildPruningBaseDir, "udp-listener.yaml")
udpListenerPodIP string
nodeIP string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod client in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
exutil.By("create UDP Listener Pod")
createResourceFromFile(oc, e2eTestNamespace, udpListenerPod)
err := waitForPodWithLabelReady(oc, e2eTestNamespace, "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "this pod with label name=udp-pod not ready")
//expose udp pod to nodeport service
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("pod", "udp-pod", "-n", e2eTestNamespace, "--type=NodePort", "--port=8080", "--protocol=UDP").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
exutil.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "udp-pod", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//send a test packet to udp endpoint which will create an udp conntrack entry on the node
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
udpListenerPodIP = getPodIPv4(oc, e2eTestNamespace, "udp-pod")
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeList.Items[0].Name)
} else {
udpListenerPodIP = getPodIPv6(oc, e2eTestNamespace, "udp-pod", ipStackType)
nodeIP = getMicroshiftNodeIPV6(oc)
}
cmd_traffic := " for n in {1..3}; do echo $n; sleep 1; done > /dev/udp/" + nodeIP + "/" + nodePort
_, err = exutil.RemoteShPodWithBash(oc, e2eTestNamespace, "hello-pod", cmd_traffic)
o.Expect(err).NotTo(o.HaveOccurred())
//make sure the corresponding conntrack entry exists for the udp endpoint
output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "conntrack", "-L", "-p", "udp", "--dport", "8080")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, udpListenerPodIP)).Should(o.BeTrue())
_, err = oc.WithoutNamespace().Run("delete").Args("pod", "-n", e2eTestNamespace, "udp-pod").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//make sure the corresponding conntrack entry goes away as we deleted udp endpoint above
output, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "conntrack", "-L", "-p", "udp", "--dport", "8080")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, udpListenerPodIP)).ShouldNot(o.BeTrue())
})
// author: [email protected]
g.It("MicroShiftOnly-Author:asood-Medium-63770-Ensure LoadBalancer service serving pods on hostnetwork or cluster network accessible only from primary node IP address and continues to serve after firewalld reload[Disruptive]", func() {
var (
caseID = "63770"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
)
if ipStackType == "ipv6single" {
// can not run on ipv6 as secondary nic doesn't have available ipv6 address.
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
pod_pmtrs := map[string]string{
"$podname": "hello-pod-host",
"$namespace": e2eTestNamespace,
"$label": "hello-pod-host",
"$nodename": nodeList.Items[0].Name,
}
exutil.By("Creating hello pod on host network in namespace")
createHostNetworkedPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod-host")
pod_pmtrs = map[string]string{
"$podname": "hello-pod-cluster",
"$namespace": e2eTestNamespace,
"$label": "hello-pod-cluster",
}
exutil.By("Creating hello pod on cluster network in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod-cluster")
secNICIP := getSecondaryNICip(oc)
podType := [2]string{"host", "cluster"}
for _, svcSuffix := range podType {
exutil.By(fmt.Sprintf("Creating service for hello pod on %s network in namespace", svcSuffix))
serviceName := "test-service-" + svcSuffix
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "PreferDualStack",
"$selector": "hello-pod-" + svcSuffix,
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By(fmt.Sprintf("Construct the URLs for the %s service", serviceName))
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod-"+svcSuffix, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeIP := getNodeIPv4(oc, e2eTestNamespace, nodeName)
svcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].port}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL := net.JoinHostPort(nodeIP, svcPort)
secNICURL := net.JoinHostPort(secNICIP, svcPort)
exutil.By(fmt.Sprintf("Checking service for hello pod on %s network is accessible on primary interface", svcSuffix))
output, err := exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Checking service for hello pod on %s network is not accessible on secondary interface", svcSuffix))
output, err = exutil.DebugNode(oc, nodeName, "curl", secNICURL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("Reload the firewalld")
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --reload")
o.Expect(err).NotTo(o.HaveOccurred())
firewallState, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --state")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(firewallState).To(o.ContainSubstring("running"))
exutil.By(fmt.Sprintf("Checking service for hello pod on %s network is accessible after firewalld reload", svcSuffix))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Delete LB service %s", serviceName))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", serviceName, "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
})
// author: [email protected]
g.It("MicroShiftOnly-Author:jechen-High-65838-br-ex interface should be unmanaged by NetworkManager", func() {
caseID := "65838"
exutil.By("Create a namespace")
e2eTestNamespace := "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
exutil.By("Check if br-ex on the node is unmanaged")
e2e.Logf("Check br-ex on node %v", nodeList.Items[0].Name)
connections, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "nmcli conn show")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(connections, "br-ex")).To(o.BeFalse())
})
// author: [email protected]
g.It("MicroShiftOnly-Author:jechen-High-65840-Killing openvswitch service should reconcile OVN control plane back to normal [Disruptive]", func() {
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
exutil.By("Kill openvswitch on the node")
e2e.Logf("Kill openvswitch on node %v", nodeList.Items[0].Name)
_, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "pkill -9 -f openvswitch")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check ovs-vswitchd and ovsdb-server are back into active running state")
output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "systemctl status ovs-vswitchd")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("active (running)"))
output, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "systemctl status ovsdb-server")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("active (running)"))
exutil.By("Check all pods in openshift-ovn-kubernetes are back to normal in running state")
statusErr := waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "component=network")
o.Expect(statusErr).NotTo(o.HaveOccurred())
})
g.It("Author:weliang-MicroShiftOnly-Medium-72796-Multus CNI bridge with host-local. [Disruptive]", func() {
var (
nadName = "bridge-host-local"
caseID = "72796"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "bridge-host-local-pod1"
pod2Name = "bridge-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using bridge with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "bridge",
"$mode": " ",
"$ipamtype": "host-local",
"$ipv4range": "192.168.20.0/24",
"$ipv6range": "fd00:dead:beef:20::/64",
"$v4rangestart": "192.168.20.1",
"$v4rangeend": "192.168.20.9",
"$v6rangestart": "fd00:dead:beef:20::1",
"$v6rangeend": "fd00:dead:beef:20::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.20.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:20::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.20.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:20::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
g.It("Author:weliang-MicroShiftOnly-Medium-72797-Multus CNI bridge with static. [Disruptive]", func() {
var (
nadName = "bridge-static"
caseID = "72797"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "bridge-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using bridge with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "bridge",
"$mode": " ",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-72798-Multus CNI bridge with dhcp. [Disruptive]", func() {
var (
nadName = "bridge-dhcp"
caseID = "72798"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "bridge-dhcp-pod1"
pod2Name = "bridge-dhcp-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-DHCP.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) > 0).To(o.BeTrue())
nodeName := nodeList.Items[0].Name
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
defer disableDHCPforCNI(oc, nodeName)
enableDHCPforCNI(oc, nodeName)
exutil.By("Configuring a NetworkAttachmentDefinition using bridge with dhcp")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "bridge",
"$mode": " ",
"$bridgename": "testbr1",
"$ipamtype": "dhcp",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Configuring first pod to get additional network")
pod1_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod1_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "88.8.8.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10:")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "88.8.8.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10:")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
g.It("Author:weliang-MicroShiftOnly-Medium-72799-Multus CNI macvlan/bridge with host-local. [Disruptive]", func() {
var (
nadName = "macvlan-bridge-host-local"
caseID = "72799"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-bridge-host-local-pod1"
pod2Name = "macvlan-bridge-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/bridge with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "bridge",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
g.It("Author:weliang-MicroShiftOnly-Medium-72904-Multus CNI macvlan/bridge with static. [Disruptive]", func() {
var (
nadName = "macvlan-bridge-static"
caseID = "72904"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-bridge-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/bridge with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "bridge",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-73082-Multus CNI macvlan/private with host-local. [Disruptive]", func() {
var (
nadName = "macvlan-private-host-local"
caseID = "73082"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-private-host-local-pod1"
pod2Name = "macvlan-private-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/private with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "private",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-73083-Multus CNI macvlan/private with static. [Disruptive]", func() {
var (
nadName = "macvlan-private-static"
caseID = "73083"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-private-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/private with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "private",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-73084-Multus CNI macvlan/vepa with static. [Disruptive]", func() {
var (
nadName = "macvlan-vepa-static"
caseID = "73084"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-vepa-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/vepa with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "vepa",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-73085-Multus CNI macvlan/vepa with host-local. [Disruptive]", func() {
var (
nadName = "macvlan-vepa-host-local"
caseID = "73085"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-vepa-host-local-pod1"
pod2Name = "macvlan-vepa-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/vepa with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "vepa",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-73086-Multus CNI ipvlan/l2 with static. [Disruptive]", func() {
var (
nadName = "ipvlan-l2-static"
caseID = "73086"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l2-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l2 with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l2",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
exutil.By("Checking if the IPs from pod1's secondary interface are assigned the static addresses")
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
g.It("Author:weliang-MicroShiftOnly-Medium-73087-Multus CNI ipvlan/l2 with host-local. [Disruptive]", func() {
var (
nadName = "ipvlan-l2-host-local"
caseID = "73087"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l2-host-local-pod1"
pod2Name = "ipvlan-l2-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l2 with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l2",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
g.It("Author:weliang-MicroShiftOnly-Medium-73098-Multus CNI ipvlan/l3 with host-local. [Disruptive]", func() {
var (
nadName = "ipvlan-l3-host-local"
caseID = "73098"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l3-host-local-pod1"
pod2Name = "ipvlan-l3-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l3 with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l3",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
g.It("Author:weliang-MicroShiftOnly-Medium-73099-Multus CNI ipvlan/l3 with static. [Disruptive]", func() {
var (
nadName = "ipvlan-l3-static"
caseID = "73099"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l3-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l3 with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l3",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
exutil.By("Checking if the IPs from pod1's secondary interface are assigned the static addresses")
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
c3239cfb-364c-4f45-afd7-a84d3419ef85
|
MicroShiftOnly-Author:anusaxen-Critical-60331-mixed ingress and egress policies can work well
|
['"fmt"', '"net"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:anusaxen-Critical-60331-mixed ingress and egress policies can work well", func() {
var (
caseID = "60331"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace1 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
e2eTestNamespace2 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
testPodFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
helloSdnFile = filepath.Join(buildPruningBaseDir, "hellosdn.yaml")
egressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/egress_49696.yaml")
ingressTypeFile = filepath.Join(buildPruningBaseDir, "networkpolicy/ingress_49696.yaml")
)
exutil.By("Create 1st namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
exutil.By("create test pods")
createResourceFromFile(oc, e2eTestNamespace1, testPodFile)
createResourceFromFile(oc, e2eTestNamespace1, helloSdnFile)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, e2eTestNamespace1, "name=test-pods"), fmt.Sprintf("this pod with label name=test-pods in ns/%s not ready", e2eTestNamespace1))
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, e2eTestNamespace1, "name=hellosdn"), fmt.Sprintf("this pod with label name=hellosdn in ns/%s not ready", e2eTestNamespace1))
hellosdnPodNameNs1 := getPodName(oc, e2eTestNamespace1, "name=hellosdn")
exutil.By("create egress type networkpolicy in ns1")
createResourceFromFile(oc, e2eTestNamespace1, egressTypeFile)
exutil.By("create ingress type networkpolicy in ns1")
createResourceFromFile(oc, e2eTestNamespace1, ingressTypeFile)
exutil.By("#. Create 2nd namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
exutil.By("create test pods in second namespace")
createResourceFromFile(oc, e2eTestNamespace2, helloSdnFile)
exutil.AssertWaitPollNoErr(waitForPodWithLabelReady(oc, e2eTestNamespace2, "name=hellosdn"), fmt.Sprintf("this pod with label name=hellosdn in ns/%s not ready", e2eTestNamespace2))
exutil.By("Get IP of the test pods in second namespace.")
hellosdnPodNameNs2 := getPodName(oc, e2eTestNamespace2, "name=hellosdn")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
hellosdnPodIP1Ns2 := getPodIPv4(oc, e2eTestNamespace2, hellosdnPodNameNs2[0])
exutil.By("curl from ns1 hellosdn pod to ns2 pod")
_, err := e2eoutput.RunHostCmd(e2eTestNamespace1, hellosdnPodNameNs1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(hellosdnPodIP1Ns2, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
hellosdnPodIP1Ns2_v6 := getPodIPv6(oc, e2eTestNamespace2, hellosdnPodNameNs2[0], ipStackType)
exutil.By("curl from ns1 hellosdn pod to ns2 pod")
_, err := e2eoutput.RunHostCmd(e2eTestNamespace1, hellosdnPodNameNs1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(hellosdnPodIP1Ns2_v6, "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(err.Error()).Should(o.ContainSubstring("exit status 28"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
c94494c9-0644-4c99-a785-2903b301fa38
|
MicroShiftOnly-Author:anusaxen-High-60332-Network Policies should work with OVNKubernetes when traffic hairpins back to the same source through a service
|
['"net"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:anusaxen-High-60332-Network Policies should work with OVNKubernetes when traffic hairpins back to the same source through a service", func() {
var (
caseID = "60332"
baseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
allowfromsameNS = filepath.Join(baseDir, "networkpolicy/allow-from-same-namespace.yaml")
svcIP string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod1",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 1st hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod1")
pod_pmtrs = map[string]string{
"$podname": "hello-pod2",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 2nd hello pod in same namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod2")
svc_pmtrs := map[string]string{
"$servicename": "test-service",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "Cluster",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "PreferDualStack",
"$selector": "hello-pod",
"$serviceType": "ClusterIP",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By("create allow-from-same-namespace ingress networkpolicy in ns")
createResourceFromFile(oc, e2eTestNamespace, allowfromsameNS)
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
exutil.By("Get Pod IPs")
helloPod1IP := getPodIPv4(oc, e2eTestNamespace, "hello-pod1")
helloPod2IP := getPodIPv4(oc, e2eTestNamespace, "hello-pod2")
exutil.By("Get svc IP")
svcIP = getSvcIPv4(oc, e2eTestNamespace, "test-service")
exutil.By("curl hello-pod1 to hello-pod2")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod1IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to hello-pod1")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod2IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
for i := 0; i < 5; i++ {
exutil.By("curl hello-pod1 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
}
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
exutil.By("Get Pod IPs")
helloPod1IP := getPodIPv6(oc, e2eTestNamespace, "hello-pod1", ipStackType)
helloPod2IP := getPodIPv6(oc, e2eTestNamespace, "hello-pod2", ipStackType)
exutil.By("Get svc IP")
if ipStackType == "ipv6single" {
svcIP = getSvcIPv6SingleStack(oc, e2eTestNamespace, "test-service")
} else {
svcIP = getSvcIPv6(oc, e2eTestNamespace, "test-service")
}
exutil.By("curl hello-pod1 to hello-pod2")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod1IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to hello-pod1")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod2IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
for i := 0; i < 5; i++ {
exutil.By("curl hello-pod1 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod1", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl hello-pod2 to service:port")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(svcIP, "27017"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
5620563e-e44c-4584-9476-721c7a509e76
|
MicroShiftOnly-Author:anusaxen-High-60426-podSelector allow-to and allow-from can work together
|
['"net"', '"path/filepath"', '"strconv"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:anusaxen-High-60426-podSelector allow-to and allow-from can work together", func() {
var (
caseID = "60426"
baseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace1 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
e2eTestNamespace2 = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
ingressTypeFile = filepath.Join(baseDir, "networkpolicy/default-deny-ingress.yaml")
allowfromRed = filepath.Join(baseDir, "microshift/np-allow-from-red.yaml")
allowtoBlue = filepath.Join(baseDir, "microshift/np-allow-to-blue.yaml")
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace1)
exutil.By("create 4 test pods in e2enamespace1")
for i := 0; i < 4; i++ {
pod_pmtrs := map[string]string{
"$podname": "test-pod" + strconv.Itoa(i),
"$namespace": e2eTestNamespace1,
"$label": "test-pod" + strconv.Itoa(i),
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace1, "test-pod"+strconv.Itoa(i))
}
var testPodNS1 [4]string
var testPodIPNS1 [4]string
var testPodIPNS1_v6 [4]string
exutil.By("Get IP of the test pods in e2eTestNamespace1 namespace.")
for i := 0; i < 4; i++ {
testPodNS1[i] = strings.Join(getPodName(oc, e2eTestNamespace1, "name=test-pod"+strconv.Itoa(i)), "")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
testPodIPNS1[i] = getPodIPv4(oc, e2eTestNamespace1, testPodNS1[i])
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
testPodIPNS1_v6[i] = getPodIPv6(oc, e2eTestNamespace1, testPodNS1[i], ipStackType)
}
}
// label pod0 and pod1 with type=red and type=blue respectively in e2eTestNamespace1
err := exutil.LabelPod(oc, e2eTestNamespace1, testPodNS1[0], "type=red")
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.LabelPod(oc, e2eTestNamespace1, testPodNS1[1], "type=blue")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create 2nd namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace2)
exutil.By("create 2 test pods in e2enamespace1")
for i := 0; i < 2; i++ {
pod_pmtrs := map[string]string{
"$podname": "test-pod" + strconv.Itoa(i),
"$namespace": e2eTestNamespace2,
"$label": "test-pod" + strconv.Itoa(i),
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace2, "test-pod"+strconv.Itoa(i))
}
var testPodNS2 [2]string
var testPodIPNS2 [2]string
var testPodIPNS2_v6 [2]string
exutil.By("Get IP of the test pods in e2eTestNamespace2 namespace.")
for i := 0; i < 2; i++ {
testPodNS2[i] = strings.Join(getPodName(oc, e2eTestNamespace2, "name=test-pod"+strconv.Itoa(i)), "")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
testPodIPNS2[i] = getPodIPv4(oc, e2eTestNamespace2, testPodNS2[i])
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
testPodIPNS2_v6[i] = getPodIPv6(oc, e2eTestNamespace2, testPodNS2[i], ipStackType)
}
}
// label pod0 with type=red in e2eTestNamespace2
err = exutil.LabelPod(oc, e2eTestNamespace2, testPodNS2[0], "type=red")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("create default deny ingress type networkpolicy in 1st namespace")
createResourceFromFile(oc, e2eTestNamespace1, ingressTypeFile)
exutil.By("create allow-from-red and allow-from-blue type networkpolicy in 1st namespace")
createResourceFromFile(oc, e2eTestNamespace1, allowfromRed)
createResourceFromFile(oc, e2eTestNamespace1, allowtoBlue)
exutil.By("Try to access the pod in e2eTestNamespace1 from each pod")
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
exutil.By("curl testPodNS10 to testPodNS13")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("Try to access the pod from e2eTestNamespace2 now")
exutil.By("curl testPodNS20 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
exutil.By("curl testPodNS10 to testPodNS13")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[0], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS12 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace1, testPodNS1[2], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("Try to access the pod from e2eTestNamespace2 now")
exutil.By("curl testPodNS20 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS11")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[1], "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("curl testPodNS21 to testPodNS13")
output, err = e2eoutput.RunHostCmd(e2eTestNamespace2, testPodNS2[1], "curl --connect-timeout 5 -s "+net.JoinHostPort(testPodIPNS1_v6[3], "8080"))
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
907b2005-ff4e-462e-9a6a-0e38b2861c72
|
MicroShiftOnly-Author:qiowang-High-60290-Idling/Unidling services
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:qiowang-High-60290-Idling/Unidling services", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testSvcFile = filepath.Join(buildPruningBaseDir, "testpod.yaml")
namespace = "test-60290"
)
exutil.By("create namespace")
defer oc.DeleteSpecifiedNamespaceAsAdmin(namespace)
oc.CreateSpecifiedNamespaceAsAdmin(namespace)
exutil.By("create test pods with rc and service")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", testSvcFile, "-n", namespace).Execute()
createResourceFromFile(oc, namespace, testSvcFile)
waitForPodWithLabelReady(oc, namespace, "name=test-pods")
svcOutput, svcErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", namespace).Output()
o.Expect(svcErr).NotTo(o.HaveOccurred())
o.Expect(svcOutput).To(o.ContainSubstring("test-service"))
exutil.By("idle test-service")
idleOutput, idleErr := oc.AsAdmin().WithoutNamespace().Run("idle").Args("-n", namespace, "test-service").Output()
o.Expect(idleErr).NotTo(o.HaveOccurred())
o.Expect(idleOutput).To(o.ContainSubstring("The service \"%v/test-service\" has been marked as idled", namespace))
exutil.By("check test pods are terminated")
getPodOutput := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
output, getPodErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace).Output()
o.Expect(getPodErr).NotTo(o.HaveOccurred())
e2e.Logf("pods status: %s", output)
if strings.Contains(output, "No resources found") {
return true, nil
}
e2e.Logf("pods are not terminated, try again")
return false, nil
})
exutil.AssertWaitPollNoErr(getPodOutput, fmt.Sprintf("Fail to terminate pods:%s", getPodOutput))
// for micorshift: unidling is not supported now, and manual re-scaling the replicas is required
// https://issues.redhat.com/browse/USHIFT-503
exutil.By("re-scaling the replicas")
_, rescaleErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("replicationcontroller/test-rc", "-n", namespace, "-p", "{\"spec\":{\"replicas\":2}}", "--type=merge").Output()
o.Expect(rescaleErr).NotTo(o.HaveOccurred())
waitForPodWithLabelReady(oc, namespace, "name=test-pods")
})
| |||||
test case
|
openshift/openshift-tests-private
|
ea32971a-9353-41f7-817f-bd021a27d713
|
Author:weliang-MicroShiftOnly-Medium-60550-Pod should be accessible via node ip and host port
|
['"context"', '"net"', '"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-60550-Pod should be accessible via node ip and host port", func() {
var (
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
testPodFile = filepath.Join(buildPruningBaseDir, "hostport-pod.yaml")
ns = "test-ocp-60550"
)
exutil.By("create a test namespace")
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
defer exutil.RecoverNamespaceRestricted(oc, ns)
exutil.SetNamespacePrivileged(oc, ns)
exutil.By("create a test pod")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", testPodFile, "-n", ns).Execute()
createResourceFromFile(oc, ns, testPodFile)
waitForPodWithLabelReady(oc, ns, "name=hostport-pod")
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
exutil.By("Get the IP address from the worker node")
nodeIPv4 := getNodeIPv4(oc, ns, nodeList.Items[0].Name)
exutil.By("Verify the pod should be accessible via nodeIP:hostport")
ipv4URL := net.JoinHostPort(nodeIPv4, "9500")
curlOutput, err := exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", ipv4URL, "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(curlOutput).To(o.ContainSubstring("Hello Hostport Pod"))
}
if ipStackType == "ipv6single" || ipStackType == "dualstack" {
exutil.By("Get the IP address from the worker node")
nodeIPv6 := getMicroshiftNodeIPV6(oc)
exutil.By("Verify the pod should be accessible via nodeIP:hostport")
ipv6URL := net.JoinHostPort(nodeIPv6, "9500")
curlOutput, err := exutil.DebugNode(oc, nodeList.Items[0].Name, "curl", ipv6URL, "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(curlOutput).To(o.ContainSubstring("Hello Hostport Pod"))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
931c2112-9b61-40c3-90a1-d3baa60c1a31
|
MicroShiftOnly-Author:anusaxen-High-60746-Check nodeport service for external/internal traffic policy and via secondary nic works well on Microshift[Disruptive]
|
['"fmt"', '"net"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:anusaxen-High-60746-Check nodeport service for external/internal traffic policy and via secondary nic works well on Microshift[Disruptive]", func() {
var (
caseID = "60746"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
nodeName string
etp string
itp string
nodeIP string
serviceName string
output string
)
if ipStackType == "ipv6single" {
// can not run on ipv6 as secondary nic doesn't have available ipv6 address.
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
exutil.By("Creating hello pod in namespace")
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
exutil.By("Creating test pod in namespace")
pod_pmtrs = map[string]string{
"$podname": "test-pod",
"$namespace": e2eTestNamespace,
"$label": "test-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "test-pod")
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
secNICip := getSecondaryNICip(oc)
//in first iteration we will create Clustr-Cluter ETP and ITP services and in 2nd iteration it will be Local-Local
for j := 0; j < 2; j++ {
if j == 0 {
itp = ""
etp = ""
exutil.By("Create NodePort service with ETP and ITP as Cluster")
serviceName = "nptest-etp-itp-cluster"
} else {
etp = "Local"
itp = "Local"
exutil.By("Create NodePort service with ETP and ITP as Local")
serviceName = "nptest-etp-itp-local"
}
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": itp,
"$externalTrafficPolicy": etp,
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "NodePort",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By(fmt.Sprintf("Get service port and NodeIP value for service %s", serviceName))
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL := net.JoinHostPort(nodeIP, nodePort)
sec_nic_url := net.JoinHostPort(secNICip, nodePort)
//Check ETP and ITP Cluster and Local type services via debugnode and test pod respectively
// Access service from nodeIP to validate ETP Cluster/Local. Default emty svc_pmtrs will create both ETP and ITP as Cluster in first iteration
exutil.By(fmt.Sprintf("Curl NodePort service %s on node IP", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
//Access service via secondary NIC to simulate ETP Cluster/Local
exutil.By(fmt.Sprintf("Curl NodePort service %s on secondary node IP", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", sec_nic_url, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
// Access service from cluster's pod network to validate ITP Cluster/Local
exutil.By(fmt.Sprintf("Curl NodePort Service %s again from a test pod", serviceName))
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "test-pod", "curl --connect-timeout 5 -s "+svcURL)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
}
//following block of code is to test impact of firewalld reload on any of service created earlier
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "nptest-etp-itp-cluster", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL := net.JoinHostPort(nodeIP, nodePort)
exutil.By("Reload the firewalld and then check nodeport service still can be worked")
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --reload")
o.Expect(err).NotTo(o.HaveOccurred())
firewallState, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --state")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(firewallState).To(o.ContainSubstring("running"))
_, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "10")
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
193b24af-fe2c-457e-96aa-ebd917457550
|
MicroShiftOnly-Author:zzhao-Critical-60968-Check loadbalance service with different external and internal traffic policies works well on Microshift
|
['"fmt"', '"net"', '"strconv"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:zzhao-Critical-60968-Check loadbalance service with different external and internal traffic policies works well on Microshift", func() {
var (
caseID = "60968"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
etp string
itp string
nodeName string
serviceName string
output string
nodeIPlist []string
nodeIP string
svcURL string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
exutil.By("Creating pods in namespace")
for j := 0; j < 2; j++ {
pod_pmtrs := map[string]string{
"$podname": "hello-pod-" + strconv.Itoa(j),
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod-"+strconv.Itoa(j))
}
exutil.By("Creating test pod in namespace")
pod_pmtrs := map[string]string{
"$podname": "test-pod",
"$namespace": e2eTestNamespace,
"$label": "test-pod",
}
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "test-pod")
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod-0", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
} else {
nodeIP = getMicroshiftNodeIPV6(oc)
}
e2e.Logf("nodeip list is %v", nodeIPlist)
policy := [2]string{"Cluster", "Local"}
for i := 0; i < 2; i++ {
if i == 0 {
itp = ""
etp = policy[i]
exutil.By(fmt.Sprintf("Create LoadBalance service with ETP and ITP as %s", policy[i]))
serviceName = "lbtest-etp-itp-" + strings.ToLower(etp)
} else {
etp = policy[i]
itp = policy[i]
exutil.By(fmt.Sprintf("Create LoadBalance service with ETP and ITP as %s", policy[i]))
serviceName = "lbtest-etp-itp-" + strings.ToLower(policy[i])
}
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": itp,
"$externalTrafficPolicy": etp,
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By(fmt.Sprintf("Get service port and NodeIP value for service %s", serviceName))
svcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].port}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if serviceName == "lbtest-etp-itp-cluster" {
svcURL = net.JoinHostPort(nodeIP, svcPort)
//Access service from host networked pod
exutil.By(fmt.Sprintf("Curl LoadBalance service %s", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Delete lb service %s", serviceName))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", serviceName, "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL = net.JoinHostPort(nodeIP, svcPort)
//firewalld entries are removed when service is deleted
exutil.By(fmt.Sprintf("Curl LoadBalance service %s again", serviceName))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
} else {
svcURL = net.JoinHostPort(nodeIP, svcPort)
// Access service from within cluster from pod on cluster network
exutil.By(fmt.Sprintf("Curl loadbalance Service %s from within cluster", serviceName))
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "test-pod", "curl --connect-timeout 5 -s "+svcURL)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Delete lb service %s", serviceName))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", serviceName, "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL = net.JoinHostPort(nodeIP, svcPort)
exutil.By(fmt.Sprintf("Curl loadbalance Service %s again from within cluster", serviceName))
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, "test-pod", "curl --connect-timeout 5 -s "+svcURL)
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
9d443666-5371-4700-a534-91b352e3def2
|
MicroShiftOnly-Author:zzhao-Medium-61218-only one loadbalance can be located at same time if creating multi loadbalance service with same port[Serial]
|
['"fmt"', '"net"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:zzhao-Medium-61218-only one loadbalance can be located at same time if creating multi loadbalance service with same port[Serial]", func() {
var (
caseID = "61218"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
nodeIP string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
exutil.By("Create one loadbalance service")
svc_pmtrs := map[string]string{
"$servicename": "lbtest",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By("Create second loadbalance service")
svc_pmtrs2 := map[string]string{
"$servicename": "lbtest2",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs2)
exutil.By("Get service port and NodeIP value")
svcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest", "-o=jsonpath={.spec.ports[*].port}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
} else {
nodeIP = getMicroshiftNodeIPV6(oc)
}
exutil.By("Check first lb service get node ip")
lbIngressip, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest", "-o=jsonpath={.status.loadBalancer.ingress[*].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lbIngressip).Should(o.ContainSubstring(nodeIP))
exutil.By("Check second lb service should't get node ip")
lbIngressip2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest2", "-o=jsonpath={.status.loadBalancer.ingress[*].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lbIngressip2).ShouldNot(o.ContainSubstring(nodeIP))
svcURL := net.JoinHostPort(nodeIP, svcPort)
exutil.By("curl loadbalance Service")
output, err := exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By("Delete lb service")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "lbtest", "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
output1 := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
lbIngressip2, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "lbtest2", "-o=jsonpath={.status.loadBalancer.ingress[*].ip}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(lbIngressip2, nodeIP) {
return true, nil
}
e2e.Logf("second loadbalance still not get node ip")
return false, nil
})
exutil.AssertWaitPollNoErr(output1, fmt.Sprintf("lbtest2 cannot get the nodeip:%s", output1))
exutil.By("check lbtest2 ingressip can be accessed")
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
6cd3c359-9c23-4a1f-916b-1df50a6da866
|
MicroShiftOnly-Author:zzhao-Medium-61168-hostnetwork pods and container pods should be able to access kubernets svc api after reboot cluster[Disruptive]
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:zzhao-Medium-61168-hostnetwork pods and container pods should be able to access kubernets svc api after reboot cluster[Disruptive]", func() {
var (
caseID = "61168"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
)
if ipStackType == "ipv6single" {
// svc api is ipv4 address
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
hellosdnPodName := getPodName(oc, e2eTestNamespace, "name=hello-pod")
exutil.By("using dns resolve as hostnetwork pods for checking")
dnsPodName := getPodName(oc, "openshift-dns", "dns.operator.openshift.io/daemonset-node-resolver=")
exutil.By("Check container pod and hostnetwork can access kubernete api")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("HTTP/2 403"))
output1, err := e2eoutput.RunHostCmd("openshift-dns", dnsPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output1).Should(o.ContainSubstring("HTTP/2 403"))
exutil.By("reboot node")
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
rebootUshiftNode(oc, nodeName)
exutil.By("Check container pod can access kubernete api")
curlOutput := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
output, err = e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
if strings.Contains(output, "HTTP/2 403") {
return true, nil
}
e2e.Logf("pods are not ready, try again")
return false, nil
})
exutil.AssertWaitPollNoErr(curlOutput, fmt.Sprintf("Fail to terminate pods:%s", curlOutput))
exutil.By("Check hostnetwork can access kubernete api")
curlHostnetworkOutput := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
output, err = e2eoutput.RunHostCmd("openshift-dns", dnsPodName[0], "curl -I --connect-timeout 5 https://10.43.0.1:443 -k")
if strings.Contains(output, "HTTP/2 403") {
return true, nil
}
e2e.Logf("dns pods are not ready, try again")
return false, nil
})
exutil.AssertWaitPollNoErr(curlHostnetworkOutput, fmt.Sprintf("Fail to terminate pods:%s", curlHostnetworkOutput))
})
| |||||
test case
|
openshift/openshift-tests-private
|
a39b027e-5a4d-41b4-b525-21db8276a632
|
MicroShiftOnly-Author:zzhao-Medium-61164-ovn MTU can be updated if it's value is less than default interface mtu[Disruptive]
|
['"net"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:zzhao-Medium-61164-ovn MTU can be updated if it's value is less than default interface mtu[Disruptive]", func() {
var (
caseID = "61164"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
mtu = "1400"
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
hellosdnPodName := getPodName(oc, e2eTestNamespace, "name=hello-pod")
exutil.By("Update the cluster MTU to 1400")
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
setMTU(oc, nodeName, mtu)
defer rollbackMTU(oc, nodeName)
exutil.By("Create one new pods to check the mtu")
pod_pmtrs1 := map[string]string{
"$podname": "hello-pod2",
"$namespace": e2eTestNamespace,
"$label": "hello-pod2",
}
createPingPodforUshift(oc, pod_pmtrs1)
waitPodReady(oc, e2eTestNamespace, "hello-pod2")
hellosdnPodName2 := getPodName(oc, e2eTestNamespace, "name=hello-pod2")
exutil.By("Check new created pod mtu changed")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName2[0], "cat /sys/class/net/eth0/mtu")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring(mtu))
exutil.By("check existing pod mtu changed")
output2, err := e2eoutput.RunHostCmd(e2eTestNamespace, hellosdnPodName[0], "cat /sys/class/net/eth0/mtu")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output2).Should(o.ContainSubstring(mtu))
})
| |||||
test case
|
openshift/openshift-tests-private
|
53955346-dbac-4aa4-926e-6639801941cb
|
MicroShiftOnly-Author:zzhao-Medium-61161-Expose coredns forward as configurable option[Disruptive][Flaky]
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:zzhao-Medium-61161-Expose coredns forward as configurable option[Disruptive][Flaky]", func() {
// need confirm support or not
g.Skip("skip this case")
exutil.By("Check the default coredns config file")
dnsConfigMap, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-dns", "cm", "dns-default", "-o=jsonpath={.data.Corefile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(dnsConfigMap).Should(o.ContainSubstring("forward . /etc/resolv.conf"))
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodeName := nodeList.Items[0].Name
exutil.By("cp the default dns config file to a new path")
cpNewConfig := "mkdir /run/systemd/resolve && cp /etc/resolv.conf /run/systemd/resolve/resolv.conf && systemctl restart microshift"
rmDnsConfig := "rm -fr /run/systemd/resolve && systemctl restart microshift"
defer func() {
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", rmDnsConfig)
output := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
dnsConfigMap, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-dns", "cm", "dns-default", "-o=jsonpath={.data.Corefile}").Output()
if strings.Contains(dnsConfigMap, "/etc/resolv.conf") {
return true, nil
}
e2e.Logf("dns config has not been updated")
return false, nil
})
exutil.AssertWaitPollNoErr(output, fmt.Sprintf("Fail to updated dns configmap:%s", output))
}()
exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", cpNewConfig)
exutil.By("Check the coredns is consuming the new config file")
output := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
dnsConfigMap, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-dns", "cm", "dns-default", "-o=jsonpath={.data.Corefile}").Output()
if strings.Contains(dnsConfigMap, "/run/systemd/resolve/resolv.conf") {
return true, nil
}
e2e.Logf("dns config has not been updated")
return false, nil
})
exutil.AssertWaitPollNoErr(output, fmt.Sprintf("Fail to updated dns configmap:%s", output))
})
| |||||
test case
|
openshift/openshift-tests-private
|
7b451aaa-8cca-412d-898e-2eb167442b96
|
MicroShiftOnly-Author:huirwang-High-60969-Blocking external access to the NodePort service on a specific host interface. [Disruptive]
|
['"fmt"', '"net"', '"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:huirwang-High-60969-Blocking external access to the NodePort service on a specific host interface. [Disruptive]", func() {
var (
caseID = "60969"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
nodeIP string
svcURL string
ipDropCmd string
)
//only run on ipv4, as no route to cluster ipv6 from external
if ipStackType == "ipv6single" {
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
svc_pmtrs := map[string]string{
"$servicename": "test-service-etp-cluster",
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "PreferDualStack",
"$selector": "hello-pod",
"$serviceType": "NodePort",
}
createServiceforUshift(oc, svc_pmtrs)
svc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "test-service-etp-cluster").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svc).Should(o.ContainSubstring("test-service-etp-cluster"))
exutil.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "test-service-etp-cluster", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeName)
svcURL = net.JoinHostPort(nodeIP, nodePort)
exutil.By("curl NodePort Service")
curlNodeCmd := fmt.Sprintf("curl %s -s --connect-timeout 5", svcURL)
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Insert a new rule in the nat table PREROUTING chain to drop all packets that match the destination port and IP address")
defer removeIPRules(oc, nodePort, nodeIP, nodeName)
ipDropCmd = fmt.Sprintf("nft -a insert rule ip nat PREROUTING tcp dport %v ip daddr %s drop", nodePort, nodeIP)
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", ipDropCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NodePort Service is blocked")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).To(o.HaveOccurred())
exutil.By("Remove the added new rule")
removeIPRules(oc, nodePort, nodeIP, nodeName)
exutil.By("Verify the NodePort service can be accessed again.")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ed4155cc-420a-4ead-ad2c-d0e673122ebe
|
MicroShiftOnly-Author:asood-High-64753-Check disabling IPv4 forwarding makes the nodeport service inaccessible. [Disruptive]
|
['"fmt"', '"net"', '"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:asood-High-64753-Check disabling IPv4 forwarding makes the nodeport service inaccessible. [Disruptive]", func() {
var (
caseID = "64753"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
serviceName = "test-service-" + caseID
)
//only run on ipv4
if ipStackType == "ipv6single" {
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "",
"$selector": "hello-pod",
"$serviceType": "NodePort",
}
createServiceforUshift(oc, svc_pmtrs)
svc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svc).Should(o.ContainSubstring(serviceName))
exutil.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, podErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod", "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(podErr).NotTo(o.HaveOccurred())
nodeIP := getNodeIPv4(oc, e2eTestNamespace, nodeName)
svcURL := net.JoinHostPort(nodeIP, nodePort)
e2e.Logf("Service URL %s", svcURL)
exutil.By("Curl NodePort Service")
curlNodeCmd := fmt.Sprintf("curl %s -s --connect-timeout 5", svcURL)
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Disable IPv4 forwarding")
enableIPv4ForwardingCmd := fmt.Sprintf("sysctl -w net.ipv4.ip_forward=1")
disableIPv4ForwardingCmd := fmt.Sprintf("sysctl -w net.ipv4.ip_forward=0")
defer func() {
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", enableIPv4ForwardingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", disableIPv4ForwardingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify NodePort Service is no longer accessible")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).To(o.HaveOccurred())
exutil.By("Enable IPv4 forwarding")
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", enableIPv4ForwardingCmd)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify the NodePort service can be accessed again.")
_, err = exec.Command("bash", "-c", curlNodeCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
288bb088-b85d-45f6-85ce-ec1dc654d284
|
MicroShiftOnly-Author:huirwang-Medium-61162-Hostname changes should not block ovn. [Disruptive]
|
['"context"', '"fmt"', '"net"', '"regexp"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:huirwang-Medium-61162-Hostname changes should not block ovn. [Disruptive]", func() {
var (
caseID = "61162"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
)
g.Skip("skip this case")
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) > 0).To(o.BeTrue())
nodeName := nodeList.Items[0].Name
exutil.By("Change node hostname")
newHostname := fmt.Sprintf("%v.61162", nodeName)
e2e.Logf("Changing the host name to %v", newHostname)
setHostnameCmd := fmt.Sprintf("hostnamectl set-hostname %v", newHostname)
defer func() {
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "hostnamectl set-hostname \"\" ;hostnamectl set-hostname --transient "+nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
restartMicroshiftService(oc, nodeName)
}()
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", setHostnameCmd)
o.Expect(err).NotTo(o.HaveOccurred())
restartMicroshiftService(oc, nodeName)
exutil.By("Verify the ovn pods running well.")
err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-master")
exutil.AssertWaitPollNoErr(err, "wait for ovnkube-master pods ready timeout")
err = waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "app=ovnkube-node")
exutil.AssertWaitPollNoErr(err, "wait for ovnkube-node pods ready timeout")
exutil.By("Verify the hostname is new hostname ")
hostnameOutput, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "cat /etc/hostname")
o.Expect(err).NotTo(o.HaveOccurred())
pattern := `dhcp.*\.61162`
re := regexp.MustCompile(pattern)
cuhostname := re.FindString(hostnameOutput)
e2e.Logf("Current hostname is %v,expected hostname is %v", cuhostname, newHostname)
o.Expect(cuhostname == newHostname).To(o.BeTrue())
exutil.By("Verify test pods working well.")
pod_pmtrs := map[string]string{
"$podname": "hello-pod1",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 1st hello pod in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod1")
pod_pmtrs = map[string]string{
"$podname": "hello-pod2",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("create 2nd hello pod in same namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod2")
exutil.By("curl hello-pod2 to hello-pod1")
helloPod1IP := getPodIPv4(oc, e2eTestNamespace, "hello-pod1")
output, err := e2eoutput.RunHostCmd(e2eTestNamespace, "hello-pod2", "curl --connect-timeout 5 -s "+net.JoinHostPort(helloPod1IP, "8080"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
645c439c-9f99-489e-9998-5ca2391e6885
|
MicroShiftOnly-Author:anusaxen-High-64752-Conntrack rule deletion for UDP traffic when NodePort service ep gets deleted
|
['"context"', '"path/filepath"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:anusaxen-High-64752-Conntrack rule deletion for UDP traffic when NodePort service ep gets deleted", func() {
var (
caseID = "64752"
buildPruningBaseDir = exutil.FixturePath("testdata", "networking")
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
udpListenerPod = filepath.Join(buildPruningBaseDir, "udp-listener.yaml")
udpListenerPodIP string
nodeIP string
)
exutil.By("Create a namespace for the scenario")
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
pod_pmtrs := map[string]string{
"$podname": "hello-pod",
"$namespace": e2eTestNamespace,
"$label": "hello-pod",
}
exutil.By("creating hello pod client in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod")
exutil.By("create UDP Listener Pod")
createResourceFromFile(oc, e2eTestNamespace, udpListenerPod)
err := waitForPodWithLabelReady(oc, e2eTestNamespace, "name=udp-pod")
exutil.AssertWaitPollNoErr(err, "this pod with label name=udp-pod not ready")
//expose udp pod to nodeport service
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("pod", "udp-pod", "-n", e2eTestNamespace, "--type=NodePort", "--port=8080", "--protocol=UDP").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
exutil.By("Get service NodePort and NodeIP value")
nodePort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, "udp-pod", "-o=jsonpath={.spec.ports[*].nodePort}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//send a test packet to udp endpoint which will create an udp conntrack entry on the node
if ipStackType == "ipv4single" || ipStackType == "dualstack" {
udpListenerPodIP = getPodIPv4(oc, e2eTestNamespace, "udp-pod")
nodeIP = getNodeIPv4(oc, e2eTestNamespace, nodeList.Items[0].Name)
} else {
udpListenerPodIP = getPodIPv6(oc, e2eTestNamespace, "udp-pod", ipStackType)
nodeIP = getMicroshiftNodeIPV6(oc)
}
cmd_traffic := " for n in {1..3}; do echo $n; sleep 1; done > /dev/udp/" + nodeIP + "/" + nodePort
_, err = exutil.RemoteShPodWithBash(oc, e2eTestNamespace, "hello-pod", cmd_traffic)
o.Expect(err).NotTo(o.HaveOccurred())
//make sure the corresponding conntrack entry exists for the udp endpoint
output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "conntrack", "-L", "-p", "udp", "--dport", "8080")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, udpListenerPodIP)).Should(o.BeTrue())
_, err = oc.WithoutNamespace().Run("delete").Args("pod", "-n", e2eTestNamespace, "udp-pod").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//make sure the corresponding conntrack entry goes away as we deleted udp endpoint above
output, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "conntrack", "-L", "-p", "udp", "--dport", "8080")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, udpListenerPodIP)).ShouldNot(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ce985b64-899a-4925-8232-ce0197a35c96
|
MicroShiftOnly-Author:asood-Medium-63770-Ensure LoadBalancer service serving pods on hostnetwork or cluster network accessible only from primary node IP address and continues to serve after firewalld reload[Disruptive]
|
['"context"', '"fmt"', '"net"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:asood-Medium-63770-Ensure LoadBalancer service serving pods on hostnetwork or cluster network accessible only from primary node IP address and continues to serve after firewalld reload[Disruptive]", func() {
var (
caseID = "63770"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
)
if ipStackType == "ipv6single" {
// can not run on ipv6 as secondary nic doesn't have available ipv6 address.
g.Skip("this case can only run on ipv4")
}
exutil.By("Create a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
pod_pmtrs := map[string]string{
"$podname": "hello-pod-host",
"$namespace": e2eTestNamespace,
"$label": "hello-pod-host",
"$nodename": nodeList.Items[0].Name,
}
exutil.By("Creating hello pod on host network in namespace")
createHostNetworkedPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod-host")
pod_pmtrs = map[string]string{
"$podname": "hello-pod-cluster",
"$namespace": e2eTestNamespace,
"$label": "hello-pod-cluster",
}
exutil.By("Creating hello pod on cluster network in namespace")
createPingPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, "hello-pod-cluster")
secNICIP := getSecondaryNICip(oc)
podType := [2]string{"host", "cluster"}
for _, svcSuffix := range podType {
exutil.By(fmt.Sprintf("Creating service for hello pod on %s network in namespace", svcSuffix))
serviceName := "test-service-" + svcSuffix
svc_pmtrs := map[string]string{
"$servicename": serviceName,
"$namespace": e2eTestNamespace,
"$label": "test-service",
"$internalTrafficPolicy": "",
"$externalTrafficPolicy": "",
"$ipFamilyPolicy": "PreferDualStack",
"$selector": "hello-pod-" + svcSuffix,
"$serviceType": "LoadBalancer",
}
createServiceforUshift(oc, svc_pmtrs)
exutil.By(fmt.Sprintf("Construct the URLs for the %s service", serviceName))
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", e2eTestNamespace, "pod", "hello-pod-"+svcSuffix, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeIP := getNodeIPv4(oc, e2eTestNamespace, nodeName)
svcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "-n", e2eTestNamespace, serviceName, "-o=jsonpath={.spec.ports[*].port}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcURL := net.JoinHostPort(nodeIP, svcPort)
secNICURL := net.JoinHostPort(secNICIP, svcPort)
exutil.By(fmt.Sprintf("Checking service for hello pod on %s network is accessible on primary interface", svcSuffix))
output, err := exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Checking service for hello pod on %s network is not accessible on secondary interface", svcSuffix))
output, err = exutil.DebugNode(oc, nodeName, "curl", secNICURL, "-s", "--connect-timeout", "5")
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).ShouldNot(o.ContainSubstring("Hello OpenShift"))
exutil.By("Reload the firewalld")
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --reload")
o.Expect(err).NotTo(o.HaveOccurred())
firewallState, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "firewall-cmd --state")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(firewallState).To(o.ContainSubstring("running"))
exutil.By(fmt.Sprintf("Checking service for hello pod on %s network is accessible after firewalld reload", svcSuffix))
output, err = exutil.DebugNode(oc, nodeName, "curl", svcURL, "-s", "--connect-timeout", "5")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("Hello OpenShift"))
exutil.By(fmt.Sprintf("Delete LB service %s", serviceName))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", serviceName, "-n", e2eTestNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
3f55bb47-8607-4a67-921e-fc43b37aef1f
|
MicroShiftOnly-Author:jechen-High-65838-br-ex interface should be unmanaged by NetworkManager
|
['"context"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:jechen-High-65838-br-ex interface should be unmanaged by NetworkManager", func() {
caseID := "65838"
exutil.By("Create a namespace")
e2eTestNamespace := "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
exutil.By("Check if br-ex on the node is unmanaged")
e2e.Logf("Check br-ex on node %v", nodeList.Items[0].Name)
connections, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "nmcli conn show")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(connections, "br-ex")).To(o.BeFalse())
})
| |||||
test case
|
openshift/openshift-tests-private
|
6436f4ec-c6d2-4292-ab00-dd31f2ec64a4
|
MicroShiftOnly-Author:jechen-High-65840-Killing openvswitch service should reconcile OVN control plane back to normal [Disruptive]
|
['"context"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("MicroShiftOnly-Author:jechen-High-65840-Killing openvswitch service should reconcile OVN control plane back to normal [Disruptive]", func() {
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
exutil.By("Kill openvswitch on the node")
e2e.Logf("Kill openvswitch on node %v", nodeList.Items[0].Name)
_, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "pkill -9 -f openvswitch")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check ovs-vswitchd and ovsdb-server are back into active running state")
output, err := exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "systemctl status ovs-vswitchd")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("active (running)"))
output, err = exutil.DebugNodeWithChroot(oc, nodeList.Items[0].Name, "bash", "-c", "systemctl status ovsdb-server")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).Should(o.ContainSubstring("active (running)"))
exutil.By("Check all pods in openshift-ovn-kubernetes are back to normal in running state")
statusErr := waitForPodWithLabelReady(oc, "openshift-ovn-kubernetes", "component=network")
o.Expect(statusErr).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
79e6a9e7-df8f-4717-b372-448ddd02b479
|
Author:weliang-MicroShiftOnly-Medium-72796-Multus CNI bridge with host-local. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-72796-Multus CNI bridge with host-local. [Disruptive]", func() {
var (
nadName = "bridge-host-local"
caseID = "72796"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "bridge-host-local-pod1"
pod2Name = "bridge-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using bridge with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "bridge",
"$mode": " ",
"$ipamtype": "host-local",
"$ipv4range": "192.168.20.0/24",
"$ipv6range": "fd00:dead:beef:20::/64",
"$v4rangestart": "192.168.20.1",
"$v4rangeend": "192.168.20.9",
"$v6rangestart": "fd00:dead:beef:20::1",
"$v6rangeend": "fd00:dead:beef:20::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.20.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:20::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.20.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:20::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
b78287cf-b9fd-47da-a694-42b458459908
|
Author:weliang-MicroShiftOnly-Medium-72797-Multus CNI bridge with static. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-72797-Multus CNI bridge with static. [Disruptive]", func() {
var (
nadName = "bridge-static"
caseID = "72797"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "bridge-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using bridge with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "bridge",
"$mode": " ",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7e370447-701e-47b6-afd7-0dcdab000c2d
|
Author:weliang-MicroShiftOnly-Medium-72798-Multus CNI bridge with dhcp. [Disruptive]
|
['"context"', '"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-72798-Multus CNI bridge with dhcp. [Disruptive]", func() {
var (
nadName = "bridge-dhcp"
caseID = "72798"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "bridge-dhcp-pod1"
pod2Name = "bridge-dhcp-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-DHCP.yaml")
)
exutil.By("Get the ready-schedulable worker nodes")
nodeList, nodeErr := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(nodeErr).NotTo(o.HaveOccurred())
o.Expect(len(nodeList.Items) > 0).To(o.BeTrue())
nodeName := nodeList.Items[0].Name
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
defer disableDHCPforCNI(oc, nodeName)
enableDHCPforCNI(oc, nodeName)
exutil.By("Configuring a NetworkAttachmentDefinition using bridge with dhcp")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "bridge",
"$mode": " ",
"$bridgename": "testbr1",
"$ipamtype": "dhcp",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Configuring first pod to get additional network")
pod1_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod1_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "88.8.8.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10:")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "88.8.8.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10:")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
df35fbd0-aff3-4699-b5ba-90ea7fa97c34
|
Author:weliang-MicroShiftOnly-Medium-72799-Multus CNI macvlan/bridge with host-local. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-72799-Multus CNI macvlan/bridge with host-local. [Disruptive]", func() {
var (
nadName = "macvlan-bridge-host-local"
caseID = "72799"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-bridge-host-local-pod1"
pod2Name = "macvlan-bridge-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/bridge with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "bridge",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
601cf074-bf39-4599-b566-ef27085298ce
|
Author:weliang-MicroShiftOnly-Medium-72904-Multus CNI macvlan/bridge with static. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-72904-Multus CNI macvlan/bridge with static. [Disruptive]", func() {
var (
nadName = "macvlan-bridge-static"
caseID = "72904"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-bridge-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/bridge with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "bridge",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
0e34243d-1a97-44e2-aec0-00f5b6997a67
|
Author:weliang-MicroShiftOnly-Medium-73082-Multus CNI macvlan/private with host-local. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73082-Multus CNI macvlan/private with host-local. [Disruptive]", func() {
var (
nadName = "macvlan-private-host-local"
caseID = "73082"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-private-host-local-pod1"
pod2Name = "macvlan-private-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/private with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "private",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
6e3db2e1-0202-4c46-8879-a3c10403afd5
|
Author:weliang-MicroShiftOnly-Medium-73083-Multus CNI macvlan/private with static. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73083-Multus CNI macvlan/private with static. [Disruptive]", func() {
var (
nadName = "macvlan-private-static"
caseID = "73083"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-private-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/private with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "private",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
6c0f3fec-2df3-4e39-8c5e-3d339a987bcb
|
Author:weliang-MicroShiftOnly-Medium-73084-Multus CNI macvlan/vepa with static. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73084-Multus CNI macvlan/vepa with static. [Disruptive]", func() {
var (
nadName = "macvlan-vepa-static"
caseID = "73084"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-vepa-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/vepa with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "vepa",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e05cb097-f6e0-4358-b0bf-63168993dff5
|
Author:weliang-MicroShiftOnly-Medium-73085-Multus CNI macvlan/vepa with host-local. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73085-Multus CNI macvlan/vepa with host-local. [Disruptive]", func() {
var (
nadName = "macvlan-vepa-host-local"
caseID = "73085"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "macvlan-vepa-host-local-pod1"
pod2Name = "macvlan-vepa-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using macvlan/vepa with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "macvlan",
"$mode": "vepa",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
283bad04-e8e2-4ed3-ae09-278393e2c3ae
|
Author:weliang-MicroShiftOnly-Medium-73086-Multus CNI ipvlan/l2 with static. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73086-Multus CNI ipvlan/l2 with static. [Disruptive]", func() {
var (
nadName = "ipvlan-l2-static"
caseID = "73086"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l2-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l2 with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l2",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
exutil.By("Checking if the IPs from pod1's secondary interface are assigned the static addresses")
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
a3f4bc9d-1dc3-4c9f-8a98-ba4ebbafaf9a
|
Author:weliang-MicroShiftOnly-Medium-73087-Multus CNI ipvlan/l2 with host-local. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73087-Multus CNI ipvlan/l2 with host-local. [Disruptive]", func() {
var (
nadName = "ipvlan-l2-host-local"
caseID = "73087"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l2-host-local-pod1"
pod2Name = "ipvlan-l2-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l2 with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l2",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getMicroshiftPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a2f5b0e7-3ba6-4ae3-bd36-4627d23b6837
|
Author:weliang-MicroShiftOnly-Medium-73098-Multus CNI ipvlan/l3 with host-local. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73098-Multus CNI ipvlan/l3 with host-local. [Disruptive]", func() {
var (
nadName = "ipvlan-l3-host-local"
caseID = "73098"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l3-host-local-pod1"
pod2Name = "ipvlan-l3-host-local-pod2"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-hostlocal.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l3 with host-local")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l3",
"$ipamtype": "host-local",
"$ipv4range": "192.168.10.0/24",
"$ipv6range": "fd00:dead:beef:10::/64",
"$v4rangestart": "192.168.10.1",
"$v4rangeend": "192.168.10.9",
"$v6rangestart": "fd00:dead:beef:10::1",
"$v6rangeend": "fd00:dead:beef:10::9",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring first pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Configuring second pod to get additional network")
pod2_pmtrs := map[string]string{
"$podname": pod2Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod2Name,
"$nadname": nadName,
"$podenvname": pod2Name,
}
defer removeResource(oc, true, true, "pod", pod2Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod2_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod2Name)
exutil.By("Get IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Get IPs from pod2's secondary interface")
pod2Net1IPv4, pod2Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod2Name, interfaceName)
e2e.Logf("The v4 address of pod2's net1 is: %v", pod2Net1IPv4)
e2e.Logf("The v6 address of pod2's net1 is: %v", pod2Net1IPv6)
o.Expect(strings.HasPrefix(pod2Net1IPv4, "192.168.10.")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod2Net1IPv6, "fd00:dead:beef:10::")).Should(o.BeTrue())
exutil.By("Checking the connectivity from pod 1 to pod 2 over secondary interface - net1")
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv4, interfaceName, pod2Name)
CurlMultusPod2PodPass(oc, e2eTestNamespace, pod1Name, pod2Net1IPv6, interfaceName, pod2Name)
})
| |||||
test case
|
openshift/openshift-tests-private
|
97efd729-936b-4a12-874d-d91ee7370d0b
|
Author:weliang-MicroShiftOnly-Medium-73099-Multus CNI ipvlan/l3 with static. [Disruptive]
|
['"net"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift.go
|
g.It("Author:weliang-MicroShiftOnly-Medium-73099-Multus CNI ipvlan/l3 with static. [Disruptive]", func() {
var (
nadName = "ipvlan-l3-static"
caseID = "73099"
e2eTestNamespace = "e2e-ushift-sdn-" + caseID + "-" + getRandomString()
pod1Name = "ipvlan-l3-static-pod1"
interfaceName = "net1"
MultusNADGenericYaml = getFileContentforUshift("microshift", "multus-NAD-static.yaml")
)
exutil.By("Creating a namespace for the scenario")
defer oc.DeleteSpecifiedNamespaceAsAdmin(e2eTestNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(e2eTestNamespace)
err := exutil.SetNamespacePrivileged(oc, e2eTestNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Configuring a NetworkAttachmentDefinition using ipvlan/l3 with static")
NAD_pmtrs := map[string]string{
"$nadname": nadName,
"$namespace": e2eTestNamespace,
"$plugintype": "ipvlan",
"$mode": "l3",
"$ipamtype": "static",
"$ipv4add": "192.168.10.100/24",
"$ipv6add": "fd00:dead:beef:10::100/64",
}
defer removeResource(oc, true, true, "net-attach-def", nadName, "-n", e2eTestNamespace)
createMultusNADforUshift(oc, NAD_pmtrs, MultusNADGenericYaml)
exutil.By("Verifying the configued NetworkAttachmentDefinition")
if checkNAD(oc, e2eTestNamespace, nadName) {
e2e.Logf("The correct network-attach-defintion: %v is created!", nadName)
} else {
e2e.Failf("The correct network-attach-defintion: %v is not created!", nadName)
}
exutil.By("Configuring a pod to get additional network")
pod_pmtrs := map[string]string{
"$podname": pod1Name,
"$namespace": e2eTestNamespace,
"$podlabel": pod1Name,
"$nadname": nadName,
"$podenvname": pod1Name,
}
defer removeResource(oc, true, true, "pod", pod1Name, "-n", e2eTestNamespace)
createMultusPodforUshift(oc, pod_pmtrs)
waitPodReady(oc, e2eTestNamespace, pod1Name)
exutil.By("Getting IPs from pod1's secondary interface")
pod1Net1IPv4, pod1Net1IPv6 := getPodMultiNetworks(oc, e2eTestNamespace, pod1Name, interfaceName)
e2e.Logf("The v4 address of pod1's net1 is: %v", pod1Net1IPv4)
e2e.Logf("The v6 address of pod1's net1 is: %v", pod1Net1IPv6)
exutil.By("Checking if the IPs from pod1's secondary interface are assigned the static addresses")
o.Expect(strings.HasPrefix(pod1Net1IPv4, "192.168.10.100")).Should(o.BeTrue())
o.Expect(strings.HasPrefix(pod1Net1IPv6, "fd00:dead:beef:10::100")).Should(o.BeTrue())
})
| |||||
file
|
openshift/openshift-tests-private
|
4533a6c5-9b67-4002-8f1c-d4c2d2d22614
|
microshift_utils
|
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
netutils "k8s.io/utils/net"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
package networking
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
netutils "k8s.io/utils/net"
)
// get file contents to be modified for Ushift
func getFileContentforUshift(baseDir string, name string) (fileContent string) {
filePath := filepath.Join(exutil.FixturePath("testdata", "networking", baseDir), name)
fileOpen, err := os.Open(filePath)
defer fileOpen.Close()
if err != nil {
e2e.Failf("Failed to open file: %s", filePath)
}
fileRead, _ := io.ReadAll(fileOpen)
if err != nil {
e2e.Failf("Failed to read file: %s", filePath)
}
return string(fileRead)
}
// get service yaml file, replace variables as per requirements in ushift and create service post that
func createServiceforUshift(oc *exutil.CLI, svc_pmtrs map[string]string) (err error) {
e2e.Logf("Getting filecontent")
ServiceGenericYaml := getFileContentforUshift("microshift", "service-generic.yaml")
//replace all variables as per createServiceforUshift() arguements
for rep, value := range svc_pmtrs {
ServiceGenericYaml = strings.ReplaceAll(ServiceGenericYaml, rep, value)
}
svcFileName := "temp-service-" + getRandomString() + ".yaml"
defer os.Remove(svcFileName)
os.WriteFile(svcFileName, []byte(ServiceGenericYaml), 0644)
// create service for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", svcFileName).Output()
return err
}
// get generic pod yaml file, replace varibles as per requirements in ushift and create pod post that
func createPingPodforUshift(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) {
PodGenericYaml := getFileContentforUshift("microshift", "ping-for-pod-generic.yaml")
//replace all variables as per createPodforUshift() arguements
for rep, value := range pod_pmtrs {
PodGenericYaml = strings.ReplaceAll(PodGenericYaml, rep, value)
}
podFileName := "temp-ping-pod-" + getRandomString() + ".yaml"
defer os.Remove(podFileName)
os.WriteFile(podFileName, []byte(PodGenericYaml), 0644)
// create ping pod for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", podFileName).Output()
return err
}
// get pod yaml file, replace varibles as per requirements in ushift and create pod on host network
func createHostNetworkedPodforUshift(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) {
PodHostYaml := getFileContentforUshift("microshift", "pod-specific-host.yaml")
//replace all variables as per createPodforUshift() arguements
for rep, value := range pod_pmtrs {
PodHostYaml = strings.ReplaceAll(PodHostYaml, rep, value)
}
podFileName := "temp-pod-host" + getRandomString() + ".yaml"
defer os.Remove(podFileName)
os.WriteFile(podFileName, []byte(PodHostYaml), 0644)
// create ping pod on the host network for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", podFileName).Output()
return err
}
func rebootUshiftNode(oc *exutil.CLI, nodeName string) {
rebootNode(oc, nodeName)
exec.Command("bash", "-c", "sleep 120").Output()
checkNodeStatus(oc, nodeName, "Ready")
}
func setMTU(oc *exutil.CLI, nodeName string, mtu string) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "cd /etc/microshift && cp ovn.yaml.default ovn.yaml && echo mtu: "+mtu+" >> ovn.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("reboot node")
rebootUshiftNode(oc, nodeName)
}
func rollbackMTU(oc *exutil.CLI, nodeName string) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "rm -f /etc/microshift/ovn.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("reboot node")
rebootUshiftNode(oc, nodeName)
}
func removeIPRules(oc *exutil.CLI, nodePort, nodeIP, nodeName string) {
ipRuleList := fmt.Sprintf("nft -a list chain ip nat PREROUTING")
rulesOutput, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", ipRuleList)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The iprules out put is :\n%s", rulesOutput)
if checkIPrules(oc, nodePort, nodeIP, rulesOutput) {
regexText := fmt.Sprintf("tcp dport %v ip daddr %v drop # handle (\\d+)", nodePort, nodeIP)
re := regexp.MustCompile(regexText)
match := re.FindStringSubmatch(rulesOutput)
o.Expect(len(match) > 1).To(o.BeTrue())
handleNumber := match[1]
removeRuleCmd := fmt.Sprintf("nft -a delete rule ip nat PREROUTING handle %v", handleNumber)
e2e.Logf("The remove rule command: %s\n", removeRuleCmd)
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", removeRuleCmd)
o.Expect(err).NotTo(o.HaveOccurred())
rulesOutput, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", ipRuleList)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkIPrules(oc, nodePort, nodeIP, rulesOutput)).Should(o.BeFalse())
}
}
func checkIPrules(oc *exutil.CLI, nodePort, nodeIP, iprules string) bool {
regexText := fmt.Sprintf("tcp dport %v ip daddr %v drop", nodePort, nodeIP)
re := regexp.MustCompile(regexText)
found := re.MatchString(iprules)
if found {
e2e.Logf("%s --Line found.", regexText)
return true
} else {
e2e.Logf("%s --Line not found.", regexText)
return false
}
}
func checkIPv6rules(oc *exutil.CLI, nodePort, nodeIP, iprules string) bool {
regexText := fmt.Sprintf("tcp dport %v ip6 daddr %v drop", nodePort, nodeIP)
re := regexp.MustCompile(regexText)
found := re.MatchString(iprules)
if found {
e2e.Logf("%s --Line found.", regexText)
return true
} else {
e2e.Logf("%s --Line not found.", regexText)
return false
}
}
func restartMicroshiftService(oc *exutil.CLI, nodeName string) {
// As restart the microshift service, the debug node pod will quit with error
exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "systemctl restart microshift")
exec.Command("bash", "-c", "sleep 60").Output()
checkNodeStatus(oc, nodeName, "Ready")
}
func getSecondaryNICip(oc *exutil.CLI) string {
masterPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-ovn-kubernetes", "-l", "app=ovnkube-master", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//primary nic will have lowest metric of 100 followed by higher metric of secondary nic. So we will look for 2nd default route line on iproute and grep its src ip which will be 2nd nic
//nic names keep changing so relying on metric logic
cmd := "ip route | sed -n '/metric 101/p' | grep -oE '\\b([0-9]{1,3}\\.){3}[0-9]{1,3}\\b' | sed -n '2p'"
sec_int, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", masterPodName, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
re := regexp.MustCompile(`\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b`)
sec_int = re.FindAllString(sec_int, -1)[0]
e2e.Logf("Secondary Interface IP is : %s", sec_int)
return sec_int
}
// get generic multus NAD yaml file, replace varibles as per requirements in ushift and create NAD with DHCP
func createMultusNADforUshift(oc *exutil.CLI, pod_pmtrs map[string]string, MultusNADGenericYaml string) (err error) {
for rep, value := range pod_pmtrs {
MultusNADGenericYaml = strings.ReplaceAll(MultusNADGenericYaml, rep, value)
}
MultusNADFileName := "MultusNAD-" + getRandomString() + ".yaml"
defer os.Remove(MultusNADFileName)
os.WriteFile(MultusNADFileName, []byte(MultusNADGenericYaml), 0644)
// create multus NAD for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", MultusNADFileName).Output()
return err
}
// get generic MultusPod yaml file, replace varibles as per requirements in ushift and create Multus Pod
func createMultusPodforUshift(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) {
MultusPodGenericYaml := getFileContentforUshift("microshift", "multus-pod-generic.yaml")
//replace all variables as per createMultusPodforUshift() arguements
for rep, value := range pod_pmtrs {
MultusPodGenericYaml = strings.ReplaceAll(MultusPodGenericYaml, rep, value)
}
MultusPodFileName := "MultusPod-" + getRandomString() + ".yaml"
defer os.Remove(MultusPodFileName)
os.WriteFile(MultusPodFileName, []byte(MultusPodGenericYaml), 0644)
// create MultusPod for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", MultusPodFileName).Output()
return err
}
// configure DHCP pool from dnsmasq for CNI IPAM DHCP testing
func enableDHCPforCNI(oc *exutil.CLI, nodeName string) {
cmdAddlink := "ip link add testbr1 type bridge"
_, cmdAddlinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdAddlink)
o.Expect(cmdAddlinkErr).NotTo(o.HaveOccurred())
cmdAddIPv4 := "ip address add 88.8.8.2/24 dev testbr1"
_, cmdAddIPv4Err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdAddIPv4)
o.Expect(cmdAddIPv4Err).NotTo(o.HaveOccurred())
cmdAddIPv6 := "ip address add fd00:dead:beef:10::2/64 dev testbr1"
_, cmdAddIPv6Err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdAddIPv6)
o.Expect(cmdAddIPv6Err).NotTo(o.HaveOccurred())
cmdUplink := "ip link set up testbr1"
_, cmdUplinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdUplink)
o.Expect(cmdUplinkErr).NotTo(o.HaveOccurred())
cmdShowIP := "ip add show testbr1"
cmdShowIPOutput, cmdShowIPErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdShowIP)
o.Expect(cmdShowIPErr).NotTo(o.HaveOccurred())
o.Expect(cmdShowIPOutput).To(o.ContainSubstring("88.8.8.2"))
dnsmasqFile := "/etc/dnsmasq.conf"
cmdConfigdnsmasq := fmt.Sprintf(`cat > %v << EOF
no-resolv
expand-hosts
bogus-priv
domain=mydomain.net
local=/mydomain.net/
interface=testbr1
dhcp-range=88.8.8.10,88.8.8.250,24h
enable-ra
dhcp-range=tag:testbr1,::1,constructor:testbr1,ra-names,12h
bind-interfaces`, dnsmasqFile)
_, cmdConfigdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdConfigdnsmasq)
o.Expect(cmdConfigdnsmasqErr).NotTo(o.HaveOccurred())
cmdRestartdnsmasq := "systemctl restart dnsmasq --now"
_, cmdRestartdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdRestartdnsmasq)
o.Expect(cmdRestartdnsmasqErr).NotTo(o.HaveOccurred())
cmdCheckdnsmasq := "systemctl status dnsmasq"
cmdCheckdnsmasqOutput, cmdCheckdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdCheckdnsmasq)
o.Expect(cmdCheckdnsmasqErr).NotTo(o.HaveOccurred())
o.Expect(cmdCheckdnsmasqOutput).To(o.ContainSubstring("active (running)"))
addDHCPFirewall := "firewall-cmd --add-service=dhcp"
_, addDHCPFirewallErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", addDHCPFirewall)
o.Expect(addDHCPFirewallErr).NotTo(o.HaveOccurred())
}
// disable dnsmasq for CNI IPAM DHCP testing
func disableDHCPforCNI(oc *exutil.CLI, nodeName string) {
cmdDelIP := "ip address del 88.8.8.2/24 dev testbr1"
_, cmdDelIPErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDelIP)
o.Expect(cmdDelIPErr).NotTo(o.HaveOccurred())
cmdDelIPv6 := "ip address del fd00:dead:beef:10::2/64 dev testbr1"
_, cmdDelIPv6Err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDelIPv6)
o.Expect(cmdDelIPv6Err).NotTo(o.HaveOccurred())
cmdDownlink := "ip link set down testbr1"
_, cmdDownlinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDownlink)
o.Expect(cmdDownlinkErr).NotTo(o.HaveOccurred())
cmdDellink := "ip link delete testbr1"
_, cmdDellinkErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDellink)
o.Expect(cmdDellinkErr).NotTo(o.HaveOccurred())
cmdStopdnsmasq := "systemctl stop dnsmasq --now"
_, cmdStopdnsmasqErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdStopdnsmasq)
o.Expect(cmdStopdnsmasqErr).NotTo(o.HaveOccurred())
cmdDeldnsmasqFile := "rm /etc/dnsmasq.conf"
_, cmdDeldnsmasqFileErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", cmdDeldnsmasqFile)
o.Expect(cmdDeldnsmasqFileErr).NotTo(o.HaveOccurred())
remDHCPFirewall := "firewall-cmd --remove-service=dhcp"
_, remDHCPFirewallErr := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", remDHCPFirewall)
o.Expect(remDHCPFirewallErr).NotTo(o.HaveOccurred())
}
// Using getMicroshiftPodMultiNetworks for microshift pod when NAD using macvlan and ipvlan
func getMicroshiftPodMultiNetworks(oc *exutil.CLI, namespace string, podName string, netName string) (string, string) {
cmd1 := "ip a sho " + netName + " | awk 'NR==3{print $2}' |grep -Eo '((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])'"
cmd2 := "ip a sho " + netName + " | awk 'NR==7{print $2}' |grep -Eo '([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}'"
podv4Output, err := e2eoutput.RunHostCmd(namespace, podName, cmd1)
o.Expect(err).NotTo(o.HaveOccurred())
podIPv4 := strings.TrimSpace(podv4Output)
podv6Output, err1 := e2eoutput.RunHostCmd(namespace, podName, cmd2)
o.Expect(err1).NotTo(o.HaveOccurred())
podIPv6 := strings.TrimSpace(podv6Output)
return podIPv4, podIPv6
}
func checkMicroshiftIPStackType(oc *exutil.CLI) string {
podNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("pod", "-n", "openshift-dns", "-l", "dns.operator.openshift.io/daemonset-node-resolver",
"-o=jsonpath='{ .items[*].status.podIPs[*].ip }'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pod network is %v", podNetwork)
if strings.Count(podNetwork, ":") >= 2 && strings.Count(podNetwork, ".") >= 2 {
return "dualstack"
} else if strings.Count(podNetwork, ":") >= 2 {
return "ipv6single"
} else if strings.Count(podNetwork, ".") >= 2 {
return "ipv4single"
}
return ""
}
// Return IPv6
func getMicroshiftNodeIPV6(oc *exutil.CLI) string {
ipStack := checkMicroshiftIPStackType(oc)
o.Expect(ipStack).ShouldNot(o.BeEmpty())
o.Expect(ipStack).NotTo(o.Equal("ipv4single"))
nodeName := getMicroshiftNodeName(oc)
if ipStack == "ipv6single" {
e2e.Logf("Its a Single Stack Cluster, either IPv4 or IPv6")
InternalIP, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[?(@.type==\"InternalIP\")].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's Internal IP is %q", InternalIP)
return InternalIP
}
if ipStack == "dualstack" {
e2e.Logf("Its a Dual Stack Cluster")
InternalIP1, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[0].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 1st Internal IP is %q", InternalIP1)
InternalIP2, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[1].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 2nd Internal IP is %q", InternalIP2)
if netutils.IsIPv6String(InternalIP1) {
return InternalIP1
}
return InternalIP2
}
return ""
}
// Return IPv6 and IPv4 in vars respectively for Dual Stack and IPv4/IPv6 in 2nd var for single stack Clusters, and var1 will be nil in those cases
func getMicroshiftNodeIP(oc *exutil.CLI, nodeName string) (string, string) {
ipStack := checkMicroshiftIPStackType(oc)
if (ipStack == "ipv6single") || (ipStack == "ipv4single") {
e2e.Logf("Its a Single Stack Cluster, either IPv4 or IPv6")
InternalIP, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[?(@.type==\"InternalIP\")].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's Internal IP is %q", InternalIP)
return "", InternalIP
}
e2e.Logf("Its a Dual Stack Cluster")
InternalIP1, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[0].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 1st Internal IP is %q", InternalIP1)
InternalIP2, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.status.addresses[1].address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The node's 2nd Internal IP is %q", InternalIP2)
if netutils.IsIPv6String(InternalIP1) {
return InternalIP1, InternalIP2
}
return InternalIP2, InternalIP1
}
func getMicroshiftNodeName(oc *exutil.CLI) string {
nodeName, err := oc.AsAdmin().Run("get").Args("nodes", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return nodeName
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
72430fbd-f2ce-4495-8fe1-c1a107d6af68
|
getFileContentforUshift
|
['"io"', '"os"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func getFileContentforUshift(baseDir string, name string) (fileContent string) {
filePath := filepath.Join(exutil.FixturePath("testdata", "networking", baseDir), name)
fileOpen, err := os.Open(filePath)
defer fileOpen.Close()
if err != nil {
e2e.Failf("Failed to open file: %s", filePath)
}
fileRead, _ := io.ReadAll(fileOpen)
if err != nil {
e2e.Failf("Failed to read file: %s", filePath)
}
return string(fileRead)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
db34cbdf-7b4f-4ffe-9692-cc395c49438f
|
createServiceforUshift
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func createServiceforUshift(oc *exutil.CLI, svc_pmtrs map[string]string) (err error) {
e2e.Logf("Getting filecontent")
ServiceGenericYaml := getFileContentforUshift("microshift", "service-generic.yaml")
//replace all variables as per createServiceforUshift() arguements
for rep, value := range svc_pmtrs {
ServiceGenericYaml = strings.ReplaceAll(ServiceGenericYaml, rep, value)
}
svcFileName := "temp-service-" + getRandomString() + ".yaml"
defer os.Remove(svcFileName)
os.WriteFile(svcFileName, []byte(ServiceGenericYaml), 0644)
// create service for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", svcFileName).Output()
return err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2848f7f6-d2ec-4d58-ad33-29cd4454321c
|
createPingPodforUshift
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func createPingPodforUshift(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) {
PodGenericYaml := getFileContentforUshift("microshift", "ping-for-pod-generic.yaml")
//replace all variables as per createPodforUshift() arguements
for rep, value := range pod_pmtrs {
PodGenericYaml = strings.ReplaceAll(PodGenericYaml, rep, value)
}
podFileName := "temp-ping-pod-" + getRandomString() + ".yaml"
defer os.Remove(podFileName)
os.WriteFile(podFileName, []byte(PodGenericYaml), 0644)
// create ping pod for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", podFileName).Output()
return err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
adfa57a8-1096-47d8-b9fb-0f349a58001b
|
createHostNetworkedPodforUshift
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func createHostNetworkedPodforUshift(oc *exutil.CLI, pod_pmtrs map[string]string) (err error) {
PodHostYaml := getFileContentforUshift("microshift", "pod-specific-host.yaml")
//replace all variables as per createPodforUshift() arguements
for rep, value := range pod_pmtrs {
PodHostYaml = strings.ReplaceAll(PodHostYaml, rep, value)
}
podFileName := "temp-pod-host" + getRandomString() + ".yaml"
defer os.Remove(podFileName)
os.WriteFile(podFileName, []byte(PodHostYaml), 0644)
// create ping pod on the host network for Microshift
_, err = oc.WithoutNamespace().Run("create").Args("-f", podFileName).Output()
return err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9de1e053-0157-4385-b4ed-4bb0da189a6a
|
rebootUshiftNode
|
['"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func rebootUshiftNode(oc *exutil.CLI, nodeName string) {
rebootNode(oc, nodeName)
exec.Command("bash", "-c", "sleep 120").Output()
checkNodeStatus(oc, nodeName, "Ready")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9aafa45f-5bac-4278-a134-5d6d3a5b1a76
|
setMTU
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func setMTU(oc *exutil.CLI, nodeName string, mtu string) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "cd /etc/microshift && cp ovn.yaml.default ovn.yaml && echo mtu: "+mtu+" >> ovn.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("reboot node")
rebootUshiftNode(oc, nodeName)
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
0b3afb95-4a80-4010-998e-a7ce27dbcc0f
|
rollbackMTU
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func rollbackMTU(oc *exutil.CLI, nodeName string) {
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "rm -f /etc/microshift/ovn.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("reboot node")
rebootUshiftNode(oc, nodeName)
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
3c5b77a0-3ca4-4626-ad1e-f5e2f4eef0e1
|
removeIPRules
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func removeIPRules(oc *exutil.CLI, nodePort, nodeIP, nodeName string) {
ipRuleList := fmt.Sprintf("nft -a list chain ip nat PREROUTING")
rulesOutput, err := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", ipRuleList)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The iprules out put is :\n%s", rulesOutput)
if checkIPrules(oc, nodePort, nodeIP, rulesOutput) {
regexText := fmt.Sprintf("tcp dport %v ip daddr %v drop # handle (\\d+)", nodePort, nodeIP)
re := regexp.MustCompile(regexText)
match := re.FindStringSubmatch(rulesOutput)
o.Expect(len(match) > 1).To(o.BeTrue())
handleNumber := match[1]
removeRuleCmd := fmt.Sprintf("nft -a delete rule ip nat PREROUTING handle %v", handleNumber)
e2e.Logf("The remove rule command: %s\n", removeRuleCmd)
_, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", removeRuleCmd)
o.Expect(err).NotTo(o.HaveOccurred())
rulesOutput, err = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", ipRuleList)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkIPrules(oc, nodePort, nodeIP, rulesOutput)).Should(o.BeFalse())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
747ab03a-fb65-44fb-bc54-7b87c245d403
|
checkIPrules
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func checkIPrules(oc *exutil.CLI, nodePort, nodeIP, iprules string) bool {
regexText := fmt.Sprintf("tcp dport %v ip daddr %v drop", nodePort, nodeIP)
re := regexp.MustCompile(regexText)
found := re.MatchString(iprules)
if found {
e2e.Logf("%s --Line found.", regexText)
return true
} else {
e2e.Logf("%s --Line not found.", regexText)
return false
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8b7212b7-bf03-415a-9926-3bef6fd012db
|
checkIPv6rules
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func checkIPv6rules(oc *exutil.CLI, nodePort, nodeIP, iprules string) bool {
regexText := fmt.Sprintf("tcp dport %v ip6 daddr %v drop", nodePort, nodeIP)
re := regexp.MustCompile(regexText)
found := re.MatchString(iprules)
if found {
e2e.Logf("%s --Line found.", regexText)
return true
} else {
e2e.Logf("%s --Line not found.", regexText)
return false
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
5ca1aa5b-f440-4db8-91fd-9bed731c312d
|
restartMicroshiftService
|
['"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func restartMicroshiftService(oc *exutil.CLI, nodeName string) {
// As restart the microshift service, the debug node pod will quit with error
exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "systemctl restart microshift")
exec.Command("bash", "-c", "sleep 60").Output()
checkNodeStatus(oc, nodeName, "Ready")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c52c07c7-bb30-4992-9ab5-16f93498dee4
|
getSecondaryNICip
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/microshift_utils.go
|
func getSecondaryNICip(oc *exutil.CLI) string {
masterPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-ovn-kubernetes", "-l", "app=ovnkube-master", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//primary nic will have lowest metric of 100 followed by higher metric of secondary nic. So we will look for 2nd default route line on iproute and grep its src ip which will be 2nd nic
//nic names keep changing so relying on metric logic
cmd := "ip route | sed -n '/metric 101/p' | grep -oE '\\b([0-9]{1,3}\\.){3}[0-9]{1,3}\\b' | sed -n '2p'"
sec_int, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", masterPodName, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
re := regexp.MustCompile(`\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b`)
sec_int = re.FindAllString(sec_int, -1)[0]
e2e.Logf("Secondary Interface IP is : %s", sec_int)
return sec_int
}
|
networking
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.